diff --git a/Makefile b/Makefile index 21b1085187..180c8dffa4 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ .PHONY: fmt dev_docker build_test_docker run_test_docker clean update -L2GETH_TAG=scroll-v5.3.0 +L2GETH_TAG=scroll-v5.5.1 help: ## Display this help message @grep -h \ diff --git a/bridge-history-api/go.mod b/bridge-history-api/go.mod index 02cf51f483..df8e8bba5c 100644 --- a/bridge-history-api/go.mod +++ b/bridge-history-api/go.mod @@ -8,10 +8,10 @@ require ( github.com/go-redis/redis/v8 v8.11.5 github.com/pressly/goose/v3 v3.16.0 github.com/prometheus/client_golang v1.19.0 - github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea + github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 github.com/stretchr/testify v1.9.0 github.com/urfave/cli/v2 v2.25.7 - golang.org/x/sync v0.6.0 + golang.org/x/sync v0.7.0 gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde ) @@ -19,7 +19,7 @@ require ( dario.cat/mergo v1.0.0 // indirect github.com/VictoriaMetrics/fastcache v1.12.1 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.12.0 // indirect + github.com/bits-and-blooms/bitset v1.13.0 // indirect github.com/btcsuite/btcd v0.20.1-beta // indirect github.com/bytedance/sonic v1.10.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect @@ -36,7 +36,7 @@ require ( github.com/docker/docker v26.1.0+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/edsrzf/mmap-go v1.0.0 // indirect - github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect + github.com/ethereum/c-kzg-4844 v1.0.2 // indirect github.com/fjl/memsize v0.0.2 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect @@ -57,7 +57,7 @@ require ( github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/huin/goupnp v1.3.0 // indirect - github.com/iden3/go-iden3-crypto v0.0.15 // indirect + github.com/iden3/go-iden3-crypto v0.0.16 // indirect github.com/jackc/pgx/v5 v5.5.4 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jinzhu/inflection v1.0.0 // indirect @@ -65,7 +65,6 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.4 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect - github.com/kr/text v0.2.0 // indirect github.com/leodido/go-urn v1.2.4 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -90,29 +89,28 @@ require ( github.com/rjeczalik/notify v0.9.1 // indirect github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/scroll-tech/zktrie v0.8.2 // indirect + github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb // indirect + github.com/scroll-tech/zktrie v0.8.4 // indirect github.com/sethvargo/go-retry v0.2.4 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/status-im/keycard-go v0.2.0 // indirect - github.com/supranational/blst v0.3.11 // indirect + github.com/supranational/blst v0.3.12 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/ugorji/go/codec v1.2.11 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/arch v0.5.0 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.17.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect diff --git a/bridge-history-api/go.sum b/bridge-history-api/go.sum index 6a5e2d80e3..5c279d0e60 100644 --- a/bridge-history-api/go.sum +++ b/bridge-history-api/go.sum @@ -23,8 +23,8 @@ github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHG github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA= -github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= +github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= @@ -61,7 +61,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0q github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI= github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -87,8 +86,8 @@ github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= -github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA= -github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c= +github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s= +github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -176,8 +175,8 @@ github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXei github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4= -github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= +github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk= +github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= @@ -309,10 +308,12 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls= -github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA= -github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ= -github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= +github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA= +github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= +github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ= +github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ= +github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= +github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= @@ -339,14 +340,14 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= -github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8= +github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= @@ -369,8 +370,8 @@ github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd h1:dz github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I= github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2 h1:E0yUuuX7UmPxXm92+yQCjMveLFO3zfvYFIJVuAqsVRA= github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2/go.mod h1:fjBLQ2TdQNl4bMjuWl9adoTGBypwUTPoGC+EqYqiIcU= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= @@ -383,21 +384,21 @@ golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -414,20 +415,18 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= diff --git a/bridge-history-api/internal/logic/history_logic.go b/bridge-history-api/internal/logic/history_logic.go index 0d97b6f0a1..ee52ce3ac5 100644 --- a/bridge-history-api/internal/logic/history_logic.go +++ b/bridge-history-api/internal/logic/history_logic.go @@ -407,7 +407,7 @@ func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs [] return err } } else { - // The transactions are sorted, thus we set the score as their indices. + // The transactions are sorted, thus we set the score as their index. for _, tx := range txs { txBytes, err := json.Marshal(tx) if err != nil { diff --git a/common/forks/forks.go b/common/forks/forks.go index ae6e165c38..a65a49c1fa 100644 --- a/common/forks/forks.go +++ b/common/forks/forks.go @@ -1,89 +1,50 @@ package forks import ( - "math" "math/big" - "sort" + "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/go-ethereum/params" ) -// CollectSortedForkHeights returns a sorted set of block numbers that one or more forks are activated on -func CollectSortedForkHeights(config *params.ChainConfig) ([]uint64, map[uint64]bool, map[string]uint64) { - type nameFork struct { - name string - block *big.Int +// GetHardforkName returns the name of the hardfork active at the given block height and timestamp. +// It checks the chain configuration to determine which hardfork is active. +func GetHardforkName(config *params.ChainConfig, blockHeight, blockTimestamp uint64) string { + if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) { + return "homestead" + } else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) { + return "bernoulli" + } else if !config.IsDarwin(blockTimestamp) { + return "curie" + } else { + return "darwin" } - - forkHeightNameMap := make(map[uint64]string) - - for _, fork := range []nameFork{ - {name: "homestead", block: config.HomesteadBlock}, - {name: "daoFork", block: config.DAOForkBlock}, - {name: "eip150", block: config.EIP150Block}, - {name: "eip155", block: config.EIP155Block}, - {name: "eip158", block: config.EIP158Block}, - {name: "byzantium", block: config.ByzantiumBlock}, - {name: "constantinople", block: config.ConstantinopleBlock}, - {name: "petersburg", block: config.PetersburgBlock}, - {name: "istanbul", block: config.IstanbulBlock}, - {name: "muirGlacier", block: config.MuirGlacierBlock}, - {name: "berlin", block: config.BerlinBlock}, - {name: "london", block: config.LondonBlock}, - {name: "arrowGlacier", block: config.ArrowGlacierBlock}, - {name: "archimedes", block: config.ArchimedesBlock}, - {name: "shanghai", block: config.ShanghaiBlock}, - {name: "bernoulli", block: config.BernoulliBlock}, - {name: "curie", block: config.CurieBlock}, - } { - if fork.block == nil { - continue - } - height := fork.block.Uint64() - - // only keep latest fork for at each height, discard the rest - forkHeightNameMap[height] = fork.name - } - - forkHeightsMap := make(map[uint64]bool) - forkNameHeightMap := make(map[string]uint64) - - for height, name := range forkHeightNameMap { - forkHeightsMap[height] = true - forkNameHeightMap[name] = height - } - - var forkHeights []uint64 - for height := range forkHeightsMap { - forkHeights = append(forkHeights, height) - } - sort.Slice(forkHeights, func(i, j int) bool { - return forkHeights[i] < forkHeights[j] - }) - return forkHeights, forkHeightsMap, forkNameHeightMap } -// BlocksUntilFork returns the number of blocks until the next fork -// returns 0 if there is no fork scheduled for the future -func BlocksUntilFork(blockHeight uint64, forkHeights []uint64) uint64 { - for _, forkHeight := range forkHeights { - if forkHeight > blockHeight { - return forkHeight - blockHeight - } +// GetCodecVersion returns the encoding codec version for the given block height and timestamp. +// It determines the appropriate codec version based on the active hardfork. +func GetCodecVersion(config *params.ChainConfig, blockHeight, blockTimestamp uint64) encoding.CodecVersion { + if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) { + return encoding.CodecV0 + } else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) { + return encoding.CodecV1 + } else if !config.IsDarwin(blockTimestamp) { + return encoding.CodecV2 + } else { + return encoding.CodecV3 } - return 0 } -// BlockRange returns the block range of the hard fork -// Need ensure the forkHeights is incremental -func BlockRange(currentForkHeight uint64, forkHeights []uint64) (from, to uint64) { - to = math.MaxInt64 - for _, height := range forkHeights { - if currentForkHeight < height { - to = height - return - } - from = height +// GetMaxChunksPerBatch returns the maximum number of chunks allowed per batch for the given block height and timestamp. +// This value may change depending on the active hardfork. +func GetMaxChunksPerBatch(config *params.ChainConfig, blockHeight, blockTimestamp uint64) uint64 { + if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) { + return 15 + } else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) { + return 15 + } else if !config.IsDarwin(blockTimestamp) { + return 45 + } else { + return 45 } - return } diff --git a/common/forks/forks_test.go b/common/forks/forks_test.go deleted file mode 100644 index 73a75a7a0c..0000000000 --- a/common/forks/forks_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package forks - -import ( - "math" - "math/big" - "testing" - - "github.com/scroll-tech/go-ethereum/params" - "github.com/stretchr/testify/require" -) - -func TestCollectSortedForkBlocks(t *testing.T) { - l, m, n := CollectSortedForkHeights(¶ms.ChainConfig{ - ArchimedesBlock: big.NewInt(0), - ShanghaiBlock: big.NewInt(3), - BernoulliBlock: big.NewInt(3), - CurieBlock: big.NewInt(4), - }) - require.Equal(t, l, []uint64{ - 0, - 3, - 4, - }) - require.Equal(t, map[uint64]bool{ - 3: true, - 4: true, - 0: true, - }, m) - require.Equal(t, map[string]uint64{ - "archimedes": 0, - "bernoulli": 3, - "curie": 4, - }, n) -} - -func TestBlocksUntilFork(t *testing.T) { - tests := map[string]struct { - block uint64 - forks []uint64 - expected uint64 - }{ - "NoFork": { - block: 44, - forks: []uint64{}, - expected: 0, - }, - "BeforeFork": { - block: 0, - forks: []uint64{1, 5}, - expected: 1, - }, - "OnFork": { - block: 1, - forks: []uint64{1, 5}, - expected: 4, - }, - "OnLastFork": { - block: 5, - forks: []uint64{1, 5}, - expected: 0, - }, - "AfterFork": { - block: 5, - forks: []uint64{1, 5}, - expected: 0, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - require.Equal(t, test.expected, BlocksUntilFork(test.block, test.forks)) - }) - } -} - -func TestBlockRange(t *testing.T) { - tests := []struct { - name string - forkHeight uint64 - forkHeights []uint64 - expectedFrom uint64 - expectedTo uint64 - }{ - { - name: "ToInfinite", - forkHeight: 300, - forkHeights: []uint64{100, 200, 300}, - expectedFrom: 300, - expectedTo: math.MaxInt64, - }, - { - name: "To300", - forkHeight: 200, - forkHeights: []uint64{100, 200, 300}, - expectedFrom: 200, - expectedTo: 300, - }, - { - name: "To200", - forkHeight: 100, - forkHeights: []uint64{100, 200, 300}, - expectedFrom: 100, - expectedTo: 200, - }, - { - name: "To100", - forkHeight: 0, - forkHeights: []uint64{100, 200, 300}, - expectedFrom: 0, - expectedTo: 100, - }, - { - name: "To200-1", - forkHeight: 100, - forkHeights: []uint64{100, 200}, - expectedFrom: 100, - expectedTo: 200, - }, - { - name: "To2", - forkHeight: 1, - forkHeights: []uint64{1, 2}, - expectedFrom: 1, - expectedTo: 2, - }, - { - name: "ToInfinite-1", - forkHeight: 0, - forkHeights: []uint64{0}, - expectedFrom: 0, - expectedTo: math.MaxInt64, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - from, to := BlockRange(test.forkHeight, test.forkHeights) - require.Equal(t, test.expectedFrom, from) - require.Equal(t, test.expectedTo, to) - }) - } -} diff --git a/common/go.mod b/common/go.mod index ae7311e9d3..1b13e8f151 100644 --- a/common/go.mod +++ b/common/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/Masterminds/semver/v3 v3.2.1 - github.com/bits-and-blooms/bitset v1.12.0 + github.com/bits-and-blooms/bitset v1.13.0 github.com/docker/docker v26.1.0+incompatible github.com/gin-contrib/pprof v1.4.0 github.com/gin-gonic/gin v1.9.1 @@ -13,7 +13,8 @@ require ( github.com/modern-go/reflect2 v1.0.2 github.com/orcaman/concurrent-map v1.0.0 github.com/prometheus/client_golang v1.19.0 - github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea + github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb + github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 github.com/stretchr/testify v1.9.0 github.com/testcontainers/testcontainers-go v0.30.0 github.com/testcontainers/testcontainers-go/modules/compose v0.30.0 @@ -77,7 +78,7 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/edsrzf/mmap-go v1.0.0 // indirect github.com/emicklei/go-restful/v3 v3.10.1 // indirect - github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect + github.com/ethereum/c-kzg-4844 v1.0.2 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fjl/memsize v0.0.2 // indirect github.com/fsnotify/fsevents v0.1.1 // indirect @@ -119,7 +120,7 @@ require ( github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/huin/goupnp v1.3.0 // indirect - github.com/iden3/go-iden3-crypto v0.0.15 // indirect + github.com/iden3/go-iden3-crypto v0.0.16 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/in-toto/in-toto-golang v0.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -182,7 +183,7 @@ require ( github.com/rjeczalik/notify v0.9.1 // indirect github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/scroll-tech/zktrie v0.8.2 // indirect + github.com/scroll-tech/zktrie v0.8.4 // indirect github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect @@ -194,12 +195,12 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.4.0 // indirect github.com/status-im/keycard-go v0.2.0 // indirect - github.com/supranational/blst v0.3.11 // indirect + github.com/supranational/blst v0.3.12 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/theupdateframework/notary v0.7.0 // indirect github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 // indirect github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect @@ -210,7 +211,7 @@ require ( github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.45.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect @@ -229,17 +230,17 @@ require ( go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/mock v0.4.0 // indirect golang.org/x/arch v0.5.0 // indirect - golang.org/x/crypto v0.19.0 // indirect + golang.org/x/crypto v0.24.0 // indirect golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect - golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.25.0 // indirect golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/term v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect diff --git a/common/go.sum b/common/go.sum index d01a56d8c4..961f3625f4 100644 --- a/common/go.sum +++ b/common/go.sum @@ -70,8 +70,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA= -github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= +github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= @@ -212,8 +212,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= -github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA= -github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c= +github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s= +github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= @@ -384,8 +384,8 @@ github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXei github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4= -github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= +github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk= +github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY= @@ -633,10 +633,12 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls= -github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA= -github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ= -github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= +github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA= +github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= +github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ= +github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ= +github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= +github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ= @@ -700,8 +702,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= -github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8= +github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E= @@ -714,10 +716,12 @@ github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4D github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw= github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA= github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375/go.mod h1:xRroudyp5iVtxKqZCrA6n2TLFRBf8bmnjr1UD4x+z7g= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 h1:ZT8ibgassurSISJ1Pj26NsM3vY2jxFZn63Nd/TpHmRw= github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302/go.mod h1:9kMVqMyQ/Sx2df5LtnGG+nbrmiZzCS7V6gjW3oGHsvI= @@ -754,8 +758,9 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300 h1:DZH5n7L3L8RxKdSyJHZt7WePgwdhHnPhQFdQSJaHF+o= github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300/go.mod h1:mOd4yUMgn2fe2nV9KXsa9AyQBFZGzygVPovsZR+Rl5w= github.com/zmap/zlint/v3 v3.5.0 h1:Eh2B5t6VKgVH0DFmTwOqE50POvyDhUaU9T2mJOe1vfQ= @@ -815,8 +820,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= @@ -826,8 +831,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -846,8 +851,8 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= @@ -859,8 +864,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -901,21 +906,21 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -930,8 +935,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/common/libzkp/impl/Cargo.lock b/common/libzkp/impl/Cargo.lock index a4ceafdbf8..c3fbb88a7f 100644 --- a/common/libzkp/impl/Cargo.lock +++ b/common/libzkp/impl/Cargo.lock @@ -39,9 +39,9 @@ dependencies = [ "ctor", "encoder", "env_logger 0.10.0", - "eth-types", + "eth-types 0.11.0", "ethers-core", - "gadgets", + "gadgets 0.11.0", "halo2-base", "halo2-ecc", "halo2_proofs", @@ -59,7 +59,41 @@ dependencies = [ "snark-verifier-sdk", "strum 0.25.0", "strum_macros 0.25.3", - "zkevm-circuits", + "zkevm-circuits 0.11.0", +] + +[[package]] +name = "aggregator" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" +dependencies = [ + "ark-std 0.3.0", + "bitstream-io", + "c-kzg", + "ctor", + "encoder", + "env_logger 0.10.0", + "eth-types 0.12.0", + "ethers-core", + "gadgets 0.12.0", + "halo2-base", + "halo2-ecc", + "halo2_proofs", + "hex", + "itertools 0.11.0", + "log", + "num-bigint", + "once_cell", + "rand", + "revm-precompile", + "revm-primitives", + "serde", + "serde_json", + "snark-verifier", + "snark-verifier-sdk", + "strum 0.25.0", + "strum_macros 0.25.3", + "zkevm-circuits 0.12.0", ] [[package]] @@ -539,18 +573,18 @@ name = "bus-mapping" version = "0.11.0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" dependencies = [ - "eth-types", + "eth-types 0.11.0", "ethers-core", "ethers-providers", "ethers-signers", - "external-tracer", - "gadgets", + "external-tracer 0.11.0", + "gadgets 0.11.0", "halo2_proofs", "hex", "itertools 0.11.0", "log", - "mock", - "mpt-zktrie", + "mock 0.11.0", + "mpt-zktrie 0.11.0", "num", "poseidon-circuit", "rand", @@ -561,6 +595,31 @@ dependencies = [ "strum_macros 0.25.3", ] +[[package]] +name = "bus-mapping" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" +dependencies = [ + "eth-types 0.12.0", + "ethers-core", + "ethers-providers", + "ethers-signers", + "gadgets 0.12.0", + "halo2_proofs", + "hex", + "itertools 0.11.0", + "log", + "mock 0.12.0", + "mpt-zktrie 0.12.0", + "num", + "poseidon-circuit", + "revm-precompile", + "serde", + "serde_json", + "strum 0.25.0", + "strum_macros 0.25.3", +] + [[package]] name = "byte-slice-cast" version = "1.2.2" @@ -1152,6 +1211,34 @@ dependencies = [ "uint", ] +[[package]] +name = "eth-types" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" +dependencies = [ + "base64 0.13.1", + "ethers-core", + "ethers-signers", + "halo2curves", + "hex", + "itertools 0.11.0", + "log", + "num", + "num-bigint", + "poseidon-base", + "regex", + "revm-precompile", + "revm-primitives", + "serde", + "serde_json", + "serde_with", + "sha3 0.10.8", + "strum 0.25.0", + "strum_macros 0.25.3", + "subtle", + "uint", +] + [[package]] name = "ethabi" version = "18.0.0" @@ -1285,8 +1372,21 @@ name = "external-tracer" version = "0.11.0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" dependencies = [ - "eth-types", - "geth-utils", + "eth-types 0.11.0", + "geth-utils 0.11.0", + "log", + "serde", + "serde_json", + "serde_stacker", +] + +[[package]] +name = "external-tracer" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" +dependencies = [ + "eth-types 0.12.0", + "geth-utils 0.12.0", "log", "serde", "serde_json", @@ -1467,7 +1567,19 @@ name = "gadgets" version = "0.11.0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" dependencies = [ - "eth-types", + "eth-types 0.11.0", + "halo2_proofs", + "poseidon-base", + "sha3 0.10.8", + "strum 0.25.0", +] + +[[package]] +name = "gadgets" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" +dependencies = [ + "eth-types 0.12.0", "halo2_proofs", "poseidon-base", "sha3 0.10.8", @@ -1495,6 +1607,16 @@ dependencies = [ "log", ] +[[package]] +name = "geth-utils" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" +dependencies = [ + "env_logger 0.10.0", + "gobuild", + "log", +] + [[package]] name = "getrandom" version = "0.2.10" @@ -2239,10 +2361,25 @@ name = "mock" version = "0.11.0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" dependencies = [ - "eth-types", + "eth-types 0.11.0", "ethers-core", "ethers-signers", - "external-tracer", + "external-tracer 0.11.0", + "itertools 0.11.0", + "log", + "rand", + "rand_chacha", +] + +[[package]] +name = "mock" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" +dependencies = [ + "eth-types 0.12.0", + "ethers-core", + "ethers-signers", + "external-tracer 0.12.0", "itertools 0.11.0", "log", "rand", @@ -2254,7 +2391,21 @@ name = "mpt-zktrie" version = "0.11.0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" dependencies = [ - "eth-types", + "eth-types 0.11.0", + "halo2curves", + "hex", + "log", + "num-bigint", + "poseidon-base", + "zktrie", +] + +[[package]] +name = "mpt-zktrie" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" +dependencies = [ + "eth-types 0.12.0", "halo2curves", "hex", "log", @@ -2726,14 +2877,48 @@ name = "prover" version = "0.11.0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" dependencies = [ - "aggregator", + "aggregator 0.11.0", + "anyhow", + "base64 0.13.1", + "blake2", + "bus-mapping 0.11.0", + "chrono", + "dotenvy", + "eth-types 0.11.0", + "ethers-core", + "git-version", + "halo2_proofs", + "hex", + "itertools 0.11.0", + "log", + "log4rs", + "mpt-zktrie 0.11.0", + "num-bigint", + "rand", + "rand_xorshift", + "serde", + "serde_derive", + "serde_json", + "serde_stacker", + "sha2", + "snark-verifier", + "snark-verifier-sdk", + "zkevm-circuits 0.11.0", +] + +[[package]] +name = "prover" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" +dependencies = [ + "aggregator 0.12.0", "anyhow", "base64 0.13.1", "blake2", - "bus-mapping", + "bus-mapping 0.12.0", "chrono", "dotenvy", - "eth-types", + "eth-types 0.12.0", "ethers-core", "git-version", "halo2_proofs", @@ -2741,7 +2926,7 @@ dependencies = [ "itertools 0.11.0", "log", "log4rs", - "mpt-zktrie", + "mpt-zktrie 0.12.0", "num-bigint", "rand", "rand_xorshift", @@ -2752,7 +2937,7 @@ dependencies = [ "sha2", "snark-verifier", "snark-verifier-sdk", - "zkevm-circuits", + "zkevm-circuits 0.12.0", ] [[package]] @@ -2958,7 +3143,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "7.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#8543dd627348907773d8057807b6a310b276bb30" +source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#36c304d9e9ba4e4b2d5468d91a6bd27210133b6a" dependencies = [ "aurora-engine-modexp", "c-kzg", @@ -2974,7 +3159,7 @@ dependencies = [ [[package]] name = "revm-primitives" version = "4.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#8543dd627348907773d8057807b6a310b276bb30" +source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#36c304d9e9ba4e4b2d5468d91a6bd27210133b6a" dependencies = [ "alloy-primitives", "auto_impl", @@ -3525,7 +3710,7 @@ checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" [[package]] name = "snark-verifier" version = "0.1.0" -source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#fe1f8906041ad323034881fbd808908250d44829" +source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#58c46b74c73156b9e09dc27617369d2acfb4461b" dependencies = [ "bytes", "ethereum-types", @@ -3548,7 +3733,7 @@ dependencies = [ [[package]] name = "snark-verifier-sdk" version = "0.0.1" -source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#fe1f8906041ad323034881fbd808908250d44829" +source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#58c46b74c73156b9e09dc27617369d2acfb4461b" dependencies = [ "bincode", "ethereum-types", @@ -4364,14 +4549,56 @@ version = "0.11.0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" dependencies = [ "array-init", - "bus-mapping", + "bus-mapping 0.11.0", + "either", + "env_logger 0.10.0", + "eth-types 0.11.0", + "ethers-core", + "ethers-signers", + "ff", + "gadgets 0.11.0", + "halo2-base", + "halo2-ecc", + "halo2-mpt-circuits", + "halo2_gadgets", + "halo2_proofs", + "hex", + "itertools 0.11.0", + "log", + "misc-precompiled-circuit", + "mock 0.11.0", + "mpt-zktrie 0.11.0", + "num", + "num-bigint", + "poseidon-circuit", + "rand", + "rand_chacha", + "rand_xorshift", + "rayon", + "serde", + "serde_json", + "sha3 0.10.8", + "snark-verifier", + "snark-verifier-sdk", + "strum 0.25.0", + "strum_macros 0.25.3", + "subtle", +] + +[[package]] +name = "zkevm-circuits" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" +dependencies = [ + "array-init", + "bus-mapping 0.12.0", "either", "env_logger 0.10.0", - "eth-types", + "eth-types 0.12.0", "ethers-core", "ethers-signers", "ff", - "gadgets", + "gadgets 0.12.0", "halo2-base", "halo2-ecc", "halo2-mpt-circuits", @@ -4381,8 +4608,8 @@ dependencies = [ "itertools 0.11.0", "log", "misc-precompiled-circuit", - "mock", - "mpt-zktrie", + "mock 0.12.0", + "mpt-zktrie 0.12.0", "num", "num-bigint", "poseidon-circuit", @@ -4410,7 +4637,8 @@ dependencies = [ "libc", "log", "once_cell", - "prover", + "prover 0.11.0", + "prover 0.12.0", "serde", "serde_derive", "serde_json", diff --git a/common/libzkp/impl/Cargo.toml b/common/libzkp/impl/Cargo.toml index d12c7a31a6..99de99359d 100644 --- a/common/libzkp/impl/Cargo.toml +++ b/common/libzkp/impl/Cargo.toml @@ -13,8 +13,6 @@ halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0 ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" } ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" } ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" } -#ethers-etherscan = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" } -#ethers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" } [patch."https://github.com/privacy-scaling-explorations/halo2.git"] halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" } [patch."https://github.com/privacy-scaling-explorations/poseidon.git"] @@ -25,7 +23,11 @@ bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/i [dependencies] halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" } snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] } -prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.4", default-features = false, features = ["parallel_syn", "scroll"] } + +# curie +prover_v3 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.4", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] } +# darwin +prover_v4 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.12.0", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] } base64 = "0.13.0" env_logger = "0.9.0" diff --git a/common/libzkp/impl/src/batch.rs b/common/libzkp/impl/src/batch.rs index 9d6a8a1199..9f1252c2cc 100644 --- a/common/libzkp/impl/src/batch.rs +++ b/common/libzkp/impl/src/batch.rs @@ -1,44 +1,14 @@ -use crate::{ - types::{CheckChunkProofsResponse, ProofResult}, - utils::{ - c_char_to_str, c_char_to_vec, file_exists, panic_catch, string_to_c_char, vec_to_c_char, - OUTPUT_DIR, - }, -}; +use crate::utils::{c_char_to_str, c_char_to_vec, panic_catch}; use libc::c_char; -use prover::{ - aggregator::{Prover, Verifier}, - check_chunk_hashes, - consts::AGG_VK_FILENAME, - utils::{chunk_trace_to_witness_block, init_env_and_log}, - BatchProof, BatchProvingTask, BlockTrace, ChunkInfo, ChunkProof, +use prover_v3::BatchProof as BatchProofLoVersion; +use prover_v4::{ + aggregator::Verifier as VerifierHiVersion, utils::init_env_and_log, + BatchProof as BatchProofHiVersion, BundleProof, }; use snark_verifier_sdk::verify_evm_calldata; -use std::{cell::OnceCell, env, ptr::null}; - -static mut PROVER: OnceCell = OnceCell::new(); -static mut VERIFIER: OnceCell = OnceCell::new(); - -/// # Safety -#[no_mangle] -pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char, assets_dir: *const c_char) { - init_env_and_log("ffi_batch_prove"); - - let params_dir = c_char_to_str(params_dir); - let assets_dir = c_char_to_str(assets_dir); - - // TODO: add a settings in scroll-prover. - env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir); - - // VK file must exist, it is optional and logged as a warning in prover. - if !file_exists(assets_dir, &AGG_VK_FILENAME) { - panic!("{} must exist in folder {}", *AGG_VK_FILENAME, assets_dir); - } +use std::{cell::OnceCell, env}; - let prover = Prover::from_dirs(params_dir, assets_dir); - - PROVER.set(prover).unwrap(); -} +static mut VERIFIER: OnceCell = OnceCell::new(); /// # Safety #[no_mangle] @@ -50,106 +20,9 @@ pub unsafe extern "C" fn init_batch_verifier(params_dir: *const c_char, assets_d // TODO: add a settings in scroll-prover. env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir); - let verifier = Verifier::from_dirs(params_dir, assets_dir); - - VERIFIER.set(verifier).unwrap(); -} - -/// # Safety -#[no_mangle] -pub unsafe extern "C" fn get_batch_vk() -> *const c_char { - let vk_result = panic_catch(|| PROVER.get_mut().unwrap().get_vk()); + let verifier_hi = VerifierHiVersion::from_dirs(params_dir, assets_dir); - vk_result - .ok() - .flatten() - .map_or(null(), |vk| string_to_c_char(base64::encode(vk))) -} - -/// # Safety -#[no_mangle] -pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *const c_char { - let check_result: Result = panic_catch(|| { - let chunk_proofs = c_char_to_vec(chunk_proofs); - let chunk_proofs = serde_json::from_slice::>(&chunk_proofs) - .map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?; - - if chunk_proofs.is_empty() { - return Err("provided chunk proofs are empty.".to_string()); - } - - let prover_ref = PROVER.get().expect("failed to get reference to PROVER."); - - let valid = prover_ref.check_protocol_of_chunks(&chunk_proofs); - Ok(valid) - }) - .unwrap_or_else(|e| Err(format!("unwind error: {e:?}"))); - - let r = match check_result { - Ok(valid) => CheckChunkProofsResponse { - ok: valid, - error: None, - }, - Err(err) => CheckChunkProofsResponse { - ok: false, - error: Some(err), - }, - }; - - serde_json::to_vec(&r).map_or(null(), vec_to_c_char) -} - -/// # Safety -#[no_mangle] -pub unsafe extern "C" fn gen_batch_proof( - chunk_hashes: *const c_char, - chunk_proofs: *const c_char, -) -> *const c_char { - let proof_result: Result, String> = panic_catch(|| { - let chunk_hashes = c_char_to_vec(chunk_hashes); - let chunk_proofs = c_char_to_vec(chunk_proofs); - - let chunk_hashes = serde_json::from_slice::>(&chunk_hashes) - .map_err(|e| format!("failed to deserialize chunk hashes: {e:?}"))?; - let chunk_proofs = serde_json::from_slice::>(&chunk_proofs) - .map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?; - - if chunk_hashes.len() != chunk_proofs.len() { - return Err(format!("chunk hashes and chunk proofs lengths mismatch: chunk_hashes.len() = {}, chunk_proofs.len() = {}", - chunk_hashes.len(), chunk_proofs.len())); - } - - let chunk_hashes_proofs: Vec<(_,_)> = chunk_hashes - .into_iter() - .zip(chunk_proofs.clone()) - .collect(); - check_chunk_hashes("", &chunk_hashes_proofs).map_err(|e| format!("failed to check chunk info: {e:?}"))?; - - let batch = BatchProvingTask { - chunk_proofs - }; - let proof = PROVER - .get_mut() - .expect("failed to get mutable reference to PROVER.") - .gen_agg_evm_proof(batch, None, OUTPUT_DIR.as_deref()) - .map_err(|e| format!("failed to generate proof: {e:?}"))?; - - serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}")) - }) - .unwrap_or_else(|e| Err(format!("unwind error: {e:?}"))); - - let r = match proof_result { - Ok(proof_bytes) => ProofResult { - message: Some(proof_bytes), - error: None, - }, - Err(err) => ProofResult { - message: None, - error: Some(err), - }, - }; - - serde_json::to_vec(&r).map_or(null(), vec_to_c_char) + VERIFIER.set(verifier_hi).unwrap(); } /// # Safety @@ -159,40 +32,38 @@ pub unsafe extern "C" fn verify_batch_proof( fork_name: *const c_char, ) -> c_char { let proof = c_char_to_vec(proof); - let proof = serde_json::from_slice::(proof.as_slice()).unwrap(); let fork_name_str = c_char_to_str(fork_name); let fork_id = match fork_name_str { - "bernoulli" => 2, "curie" => 3, + "darwin" => 4, _ => { - log::warn!("unexpected fork_name {fork_name_str}, treated as curie"); - 3 + log::warn!("unexpected fork_name {fork_name_str}, treated as darwin"); + 4 } }; let verified = panic_catch(|| { - if fork_id == 2 { - // before upgrade#3(DA Compression) + if fork_id == 3 { + // As of upgrade #3 (Curie), we verify batch proofs on-chain (EVM). + let proof = serde_json::from_slice::(proof.as_slice()).unwrap(); verify_evm_calldata( - include_bytes!("plonk_verifier_0.10.3.bin").to_vec(), + include_bytes!("plonk_verifier_0.11.4.bin").to_vec(), proof.calldata(), ) } else { - VERIFIER.get().unwrap().verify_agg_evm_proof(proof) + // Post upgrade #4 (Darwin), batch proofs are not EVM-verifiable. Instead they are + // halo2 proofs meant to be bundled recursively. + let proof = serde_json::from_slice::(proof.as_slice()).unwrap(); + VERIFIER.get().unwrap().verify_batch_proof(&proof) } }); verified.unwrap_or(false) as c_char } -// This function is only used for debugging on Go side. /// # Safety #[no_mangle] -pub unsafe extern "C" fn block_traces_to_chunk_info(block_traces: *const c_char) -> *const c_char { - let block_traces = c_char_to_vec(block_traces); - let block_traces = serde_json::from_slice::>(&block_traces).unwrap(); - - let witness_block = chunk_trace_to_witness_block(block_traces).unwrap(); - let chunk_info = ChunkInfo::from_witness_block(&witness_block, false); - - let chunk_info_bytes = serde_json::to_vec(&chunk_info).unwrap(); - vec_to_c_char(chunk_info_bytes) +pub unsafe extern "C" fn verify_bundle_proof(proof: *const c_char) -> c_char { + let proof = c_char_to_vec(proof); + let proof = serde_json::from_slice::(proof.as_slice()).unwrap(); + let verified = panic_catch(|| VERIFIER.get().unwrap().verify_bundle_proof(proof)); + verified.unwrap_or(false) as c_char } diff --git a/common/libzkp/impl/src/chunk.rs b/common/libzkp/impl/src/chunk.rs index bbd24341d2..086b4f03a5 100644 --- a/common/libzkp/impl/src/chunk.rs +++ b/common/libzkp/impl/src/chunk.rs @@ -1,108 +1,63 @@ -use crate::{ - types::ProofResult, - utils::{ - c_char_to_str, c_char_to_vec, file_exists, panic_catch, string_to_c_char, vec_to_c_char, - OUTPUT_DIR, - }, -}; +use crate::utils::{c_char_to_str, c_char_to_vec, panic_catch}; use libc::c_char; -use prover::{ - consts::CHUNK_VK_FILENAME, - utils::init_env_and_log, - zkevm::{Prover, Verifier}, - BlockTrace, ChunkProof, ChunkProvingTask, +use prover_v3::{zkevm::Verifier as VerifierLoVersion, ChunkProof as ChunkProofLoVersion}; +use prover_v4::{ + utils::init_env_and_log, zkevm::Verifier as VerifierHiVersion, + ChunkProof as ChunkProofHiVersion, }; -use std::{cell::OnceCell, env, ptr::null}; - -static mut PROVER: OnceCell = OnceCell::new(); -static mut VERIFIER: OnceCell = OnceCell::new(); - -/// # Safety -#[no_mangle] -pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char, assets_dir: *const c_char) { - init_env_and_log("ffi_chunk_prove"); - - let params_dir = c_char_to_str(params_dir); - let assets_dir = c_char_to_str(assets_dir); +use std::{cell::OnceCell, env}; - // TODO: add a settings in scroll-prover. - env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir); - - // VK file must exist, it is optional and logged as a warning in prover. - if !file_exists(assets_dir, &CHUNK_VK_FILENAME) { - panic!("{} must exist in folder {}", *CHUNK_VK_FILENAME, assets_dir); - } - - let prover = Prover::from_dirs(params_dir, assets_dir); - - PROVER.set(prover).unwrap(); -} +static mut VERIFIER_LO_VERSION: OnceCell = OnceCell::new(); +static mut VERIFIER_HI_VERSION: OnceCell = OnceCell::new(); /// # Safety #[no_mangle] -pub unsafe extern "C" fn init_chunk_verifier(params_dir: *const c_char, assets_dir: *const c_char) { +pub unsafe extern "C" fn init_chunk_verifier( + params_dir: *const c_char, + v3_assets_dir: *const c_char, + v4_assets_dir: *const c_char, +) { init_env_and_log("ffi_chunk_verify"); let params_dir = c_char_to_str(params_dir); - let assets_dir = c_char_to_str(assets_dir); + let v3_assets_dir = c_char_to_str(v3_assets_dir); + let v4_assets_dir = c_char_to_str(v4_assets_dir); // TODO: add a settings in scroll-prover. - env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir); - let verifier = Verifier::from_dirs(params_dir, assets_dir); + env::set_var("SCROLL_PROVER_ASSETS_DIR", v3_assets_dir); + let verifier_lo = VerifierLoVersion::from_dirs(params_dir, v3_assets_dir); + env::set_var("SCROLL_PROVER_ASSETS_DIR", v4_assets_dir); + let verifier_hi = VerifierHiVersion::from_dirs(params_dir, v4_assets_dir); - VERIFIER.set(verifier).unwrap(); + VERIFIER_LO_VERSION.set(verifier_lo).unwrap(); + VERIFIER_HI_VERSION.set(verifier_hi).unwrap(); } /// # Safety #[no_mangle] -pub unsafe extern "C" fn get_chunk_vk() -> *const c_char { - let vk_result = panic_catch(|| PROVER.get_mut().unwrap().get_vk()); - - vk_result - .ok() - .flatten() - .map_or(null(), |vk| string_to_c_char(base64::encode(vk))) -} - -/// # Safety -#[no_mangle] -pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char { - let proof_result: Result, String> = panic_catch(|| { - let block_traces = c_char_to_vec(block_traces); - let block_traces = serde_json::from_slice::>(&block_traces) - .map_err(|e| format!("failed to deserialize block traces: {e:?}"))?; - let chunk = ChunkProvingTask::from(block_traces); - - let proof = PROVER - .get_mut() - .expect("failed to get mutable reference to PROVER.") - .gen_chunk_proof(chunk, None, None, OUTPUT_DIR.as_deref()) - .map_err(|e| format!("failed to generate proof: {e:?}"))?; - - serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}")) - }) - .unwrap_or_else(|e| Err(format!("unwind error: {e:?}"))); - - let r = match proof_result { - Ok(proof_bytes) => ProofResult { - message: Some(proof_bytes), - error: None, - }, - Err(err) => ProofResult { - message: None, - error: Some(err), - }, - }; - - serde_json::to_vec(&r).map_or(null(), vec_to_c_char) -} - -/// # Safety -#[no_mangle] -pub unsafe extern "C" fn verify_chunk_proof(proof: *const c_char) -> c_char { +pub unsafe extern "C" fn verify_chunk_proof( + proof: *const c_char, + fork_name: *const c_char, +) -> c_char { let proof = c_char_to_vec(proof); - let proof = serde_json::from_slice::(proof.as_slice()).unwrap(); - let verified = panic_catch(|| VERIFIER.get().unwrap().verify_chunk_proof(proof)); + let fork_name_str = c_char_to_str(fork_name); + let fork_id = match fork_name_str { + "curie" => 3, + "darwin" => 4, + _ => { + log::warn!("unexpected fork_name {fork_name_str}, treated as darwin"); + 4 + } + }; + let verified = panic_catch(|| { + if fork_id == 3 { + let proof = serde_json::from_slice::(proof.as_slice()).unwrap(); + VERIFIER_LO_VERSION.get().unwrap().verify_chunk_proof(proof) + } else { + let proof = serde_json::from_slice::(proof.as_slice()).unwrap(); + VERIFIER_HI_VERSION.get().unwrap().verify_chunk_proof(proof) + } + }); verified.unwrap_or(false) as c_char } diff --git a/common/libzkp/impl/src/plonk_verifier_0.10.3.bin b/common/libzkp/impl/src/plonk_verifier_0.10.3.bin deleted file mode 100644 index 230473bc00..0000000000 Binary files a/common/libzkp/impl/src/plonk_verifier_0.10.3.bin and /dev/null differ diff --git a/common/libzkp/impl/src/plonk_verifier_0.11.4.bin b/common/libzkp/impl/src/plonk_verifier_0.11.4.bin new file mode 100644 index 0000000000..aaa8d7016c Binary files /dev/null and b/common/libzkp/impl/src/plonk_verifier_0.11.4.bin differ diff --git a/common/libzkp/impl/src/utils.rs b/common/libzkp/impl/src/utils.rs index b241555cf6..3091fa316a 100644 --- a/common/libzkp/impl/src/utils.rs +++ b/common/libzkp/impl/src/utils.rs @@ -1,29 +1,9 @@ -use once_cell::sync::Lazy; use std::{ - env, - ffi::{CStr, CString}, + ffi::CStr, os::raw::c_char, panic::{catch_unwind, AssertUnwindSafe}, - path::PathBuf, }; -// Only used for debugging. -pub(crate) static OUTPUT_DIR: Lazy> = - Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok()); - -/// # Safety -#[no_mangle] -pub extern "C" fn free_c_chars(ptr: *mut c_char) { - if ptr.is_null() { - log::warn!("Try to free an empty pointer!"); - return; - } - - unsafe { - let _ = CString::from_raw(ptr); - } -} - pub(crate) fn c_char_to_str(c: *const c_char) -> &'static str { let cstr = unsafe { CStr::from_ptr(c) }; cstr.to_str().unwrap() @@ -34,21 +14,6 @@ pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec { cstr.to_bytes().to_vec() } -pub(crate) fn string_to_c_char(string: String) -> *const c_char { - CString::new(string).unwrap().into_raw() -} - -pub(crate) fn vec_to_c_char(bytes: Vec) -> *const c_char { - CString::new(bytes).unwrap().into_raw() -} - -pub(crate) fn file_exists(dir: &str, filename: &str) -> bool { - let mut path = PathBuf::from(dir); - path.push(filename); - - path.exists() -} - pub(crate) fn panic_catch R, R>(f: F) -> Result { catch_unwind(AssertUnwindSafe(f)).map_err(|err| { if let Some(s) = err.downcast_ref::() { diff --git a/common/libzkp/interface/libzkp.h b/common/libzkp/interface/libzkp.h index dab60b7bed..3b8c359a60 100644 --- a/common/libzkp/interface/libzkp.h +++ b/common/libzkp/interface/libzkp.h @@ -1,15 +1,11 @@ -void init_batch_prover(char* params_dir, char* assets_dir); +// BatchVerifier is used to: +// - Verify a batch proof +// - Verify a bundle proof void init_batch_verifier(char* params_dir, char* assets_dir); -char* get_batch_vk(); -char* check_chunk_proofs(char* chunk_proofs); -char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs); + char verify_batch_proof(char* proof, char* fork_name); -void init_chunk_prover(char* params_dir, char* assets_dir); -void init_chunk_verifier(char* params_dir, char* assets_dir); -char* get_chunk_vk(); -char* gen_chunk_proof(char* block_traces); -char verify_chunk_proof(char* proof); +char verify_bundle_proof(char* proof); -char* block_traces_to_chunk_info(char* block_traces); -void free_c_chars(char* ptr); +void init_chunk_verifier(char* params_dir, char* v3_assets_dir, char* v4_assets_dir); +char verify_chunk_proof(char* proof, char* fork_name); diff --git a/common/types/db.go b/common/types/db.go index 8766c24ad9..b05aea2eb3 100644 --- a/common/types/db.go +++ b/common/types/db.go @@ -196,6 +196,31 @@ func (s ChunkProofsStatus) String() string { } } +// BatchProofsStatus describes the proving status of batches that belong to a bundle. +type BatchProofsStatus int + +const ( + // BatchProofsStatusUndefined represents an undefined batch proofs status + BatchProofsStatusUndefined BatchProofsStatus = iota + + // BatchProofsStatusPending means that some batches that belong to this bundle have not been proven + BatchProofsStatusPending + + // BatchProofsStatusReady means that all batches that belong to this bundle have been proven + BatchProofsStatusReady +) + +func (s BatchProofsStatus) String() string { + switch s { + case BatchProofsStatusPending: + return "BatchProofsStatusPending" + case BatchProofsStatusReady: + return "BatchProofsStatusReady" + default: + return fmt.Sprintf("Undefined BatchProofsStatus (%d)", int32(s)) + } +} + // RollupStatus block_batch rollup_status (pending, committing, committed, commit_failed, finalizing, finalized, finalize_skipped, finalize_failed) type RollupStatus int diff --git a/common/types/message/auth_msg.go b/common/types/message/auth_msg.go deleted file mode 100644 index 664f497ea3..0000000000 --- a/common/types/message/auth_msg.go +++ /dev/null @@ -1,91 +0,0 @@ -package message - -import ( - "crypto/ecdsa" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/common/hexutil" - "github.com/scroll-tech/go-ethereum/crypto" - "github.com/scroll-tech/go-ethereum/rlp" -) - -// AuthMsg is the first message exchanged from the Prover to the Sequencer. -// It effectively acts as a registration, and makes the Prover identification -// known to the Sequencer. -type AuthMsg struct { - // Message fields - Identity *Identity `json:"message"` - // Prover signature - Signature string `json:"signature"` -} - -// Identity contains all the fields to be signed by the prover. -type Identity struct { - // ProverName the prover name - ProverName string `json:"prover_name"` - // ProverVersion the prover version - ProverVersion string `json:"prover_version"` - // Challenge unique challenge generated by manager - Challenge string `json:"challenge"` - // HardForkName the hard fork name - HardForkName string `json:"hard_fork_name"` -} - -// SignWithKey auth message with private key and set public key in auth message's Identity -func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error { - // Hash identity content - hash, err := a.Identity.Hash() - if err != nil { - return err - } - - // Sign register message - sig, err := crypto.Sign(hash, priv) - if err != nil { - return err - } - a.Signature = hexutil.Encode(sig) - - return nil -} - -// Verify verifies the message of auth. -func (a *AuthMsg) Verify() (bool, error) { - hash, err := a.Identity.Hash() - if err != nil { - return false, err - } - sig := common.FromHex(a.Signature) - - pk, err := crypto.SigToPub(hash, sig) - if err != nil { - return false, err - } - return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil -} - -// PublicKey return public key from signature -func (a *AuthMsg) PublicKey() (string, error) { - hash, err := a.Identity.Hash() - if err != nil { - return "", err - } - sig := common.FromHex(a.Signature) - // recover public key - pk, err := crypto.SigToPub(hash, sig) - if err != nil { - return "", err - } - return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil -} - -// Hash returns the hash of the auth message, which should be the message used -// to construct the Signature. -func (i *Identity) Hash() ([]byte, error) { - byt, err := rlp.EncodeToBytes(i) - if err != nil { - return nil, err - } - hash := crypto.Keccak256Hash(byt) - return hash[:], nil -} diff --git a/common/types/message/legacy_auth_msg.go b/common/types/message/legacy_auth_msg.go deleted file mode 100644 index 1ba2b40cc7..0000000000 --- a/common/types/message/legacy_auth_msg.go +++ /dev/null @@ -1,89 +0,0 @@ -package message - -import ( - "crypto/ecdsa" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/common/hexutil" - "github.com/scroll-tech/go-ethereum/crypto" - "github.com/scroll-tech/go-ethereum/rlp" -) - -// LegacyAuthMsg is the old auth message exchanged from the Prover to the Sequencer. -// It effectively acts as a registration, and makes the Prover identification -// known to the Sequencer. -type LegacyAuthMsg struct { - // Message fields - Identity *LegacyIdentity `json:"message"` - // Prover signature - Signature string `json:"signature"` -} - -// LegacyIdentity contains all the fields to be signed by the prover. -type LegacyIdentity struct { - // ProverName the prover name - ProverName string `json:"prover_name"` - // ProverVersion the prover version - ProverVersion string `json:"prover_version"` - // Challenge unique challenge generated by manager - Challenge string `json:"challenge"` -} - -// SignWithKey auth message with private key and set public key in auth message's Identity -func (a *LegacyAuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error { - // Hash identity content - hash, err := a.Identity.Hash() - if err != nil { - return err - } - - // Sign register message - sig, err := crypto.Sign(hash, priv) - if err != nil { - return err - } - a.Signature = hexutil.Encode(sig) - - return nil -} - -// Verify verifies the message of auth. -func (a *LegacyAuthMsg) Verify() (bool, error) { - hash, err := a.Identity.Hash() - if err != nil { - return false, err - } - sig := common.FromHex(a.Signature) - - pk, err := crypto.SigToPub(hash, sig) - if err != nil { - return false, err - } - return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil -} - -// PublicKey return public key from signature -func (a *LegacyAuthMsg) PublicKey() (string, error) { - hash, err := a.Identity.Hash() - if err != nil { - return "", err - } - sig := common.FromHex(a.Signature) - // recover public key - pk, err := crypto.SigToPub(hash, sig) - if err != nil { - return "", err - } - return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil -} - -// Hash returns the hash of the auth message, which should be the message used -// to construct the Signature. -func (i *LegacyIdentity) Hash() ([]byte, error) { - byt, err := rlp.EncodeToBytes(i) - if err != nil { - return nil, err - } - hash := crypto.Keccak256Hash(byt) - return hash[:], nil -} diff --git a/common/types/message/message.go b/common/types/message/message.go index 381fe1043d..00dd867dce 100644 --- a/common/types/message/message.go +++ b/common/types/message/message.go @@ -1,28 +1,11 @@ package message import ( - "crypto/ecdsa" - "crypto/rand" - "encoding/hex" "errors" "fmt" + "github.com/scroll-tech/da-codec/encoding/codecv3" "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/common/hexutil" - "github.com/scroll-tech/go-ethereum/crypto" - "github.com/scroll-tech/go-ethereum/rlp" -) - -// ProofFailureType the proof failure type -type ProofFailureType int - -const ( - // ProofFailureUndefined the undefined type proof failure type - ProofFailureUndefined ProofFailureType = iota - // ProofFailurePanic proof failure for prover panic - ProofFailurePanic - // ProofFailureNoPanic proof failure for no prover panic - ProofFailureNoPanic ) // RespStatus represents status code from prover to scroll @@ -35,7 +18,7 @@ const ( StatusProofError ) -// ProofType represents the type of prover. +// ProofType represents the type of task. type ProofType uint8 func (r ProofType) String() string { @@ -44,6 +27,8 @@ func (r ProofType) String() string { return "proof type chunk" case ProofTypeBatch: return "proof type batch" + case ProofTypeBundle: + return "proof type bundle" default: return fmt.Sprintf("illegal proof type: %d", r) } @@ -52,93 +37,14 @@ func (r ProofType) String() string { const ( // ProofTypeUndefined is an unknown proof type ProofTypeUndefined ProofType = iota - // ProofTypeChunk is default prover, it only generates zk proof from traces. + // ProofTypeChunk generates a proof for a ZkEvm chunk, where the inputs are the execution traces for blocks contained in the chunk. ProofTypeChunk is the default proof type. ProofTypeChunk - // ProofTypeBatch generates zk proof from other zk proofs and aggregate them into one proof. + // ProofTypeBatch generates zk proof from chunk proofs ProofTypeBatch + // ProofTypeBundle generates zk proof from batch proofs + ProofTypeBundle ) -// GenerateToken generates token -func GenerateToken() (string, error) { - b := make([]byte, 16) - if _, err := rand.Read(b); err != nil { - return "", err - } - return hex.EncodeToString(b), nil -} - -// ProofMsg is the data structure sent to the coordinator. -type ProofMsg struct { - *ProofDetail `json:"zkProof"` - // Prover signature - Signature string `json:"signature"` - - // Prover public key - publicKey string -} - -// Sign signs the ProofMsg. -func (a *ProofMsg) Sign(priv *ecdsa.PrivateKey) error { - hash, err := a.ProofDetail.Hash() - if err != nil { - return err - } - sig, err := crypto.Sign(hash, priv) - if err != nil { - return err - } - a.Signature = hexutil.Encode(sig) - return nil -} - -// Verify verifies ProofMsg.Signature. -func (a *ProofMsg) Verify() (bool, error) { - hash, err := a.ProofDetail.Hash() - if err != nil { - return false, err - } - sig := common.FromHex(a.Signature) - // recover public key - if a.publicKey == "" { - pk, err := crypto.SigToPub(hash, sig) - if err != nil { - return false, err - } - a.publicKey = common.Bytes2Hex(crypto.CompressPubkey(pk)) - } - - return crypto.VerifySignature(common.FromHex(a.publicKey), hash, sig[:len(sig)-1]), nil -} - -// PublicKey return public key from signature -func (a *ProofMsg) PublicKey() (string, error) { - if a.publicKey == "" { - hash, err := a.ProofDetail.Hash() - if err != nil { - return "", err - } - sig := common.FromHex(a.Signature) - // recover public key - pk, err := crypto.SigToPub(hash, sig) - if err != nil { - return "", err - } - a.publicKey = common.Bytes2Hex(crypto.CompressPubkey(pk)) - return a.publicKey, nil - } - - return a.publicKey, nil -} - -// TaskMsg is a wrapper type around db ProveTask type. -type TaskMsg struct { - UUID string `json:"uuid"` - ID string `json:"id"` - Type ProofType `json:"type,omitempty"` - BatchTaskDetail *BatchTaskDetail `json:"batch_task_detail,omitempty"` - ChunkTaskDetail *ChunkTaskDetail `json:"chunk_task_detail,omitempty"` -} - // ChunkTaskDetail is a type containing ChunkTask detail. type ChunkTaskDetail struct { BlockHashes []common.Hash `json:"block_hashes"` @@ -146,30 +52,14 @@ type ChunkTaskDetail struct { // BatchTaskDetail is a type containing BatchTask detail. type BatchTaskDetail struct { - ChunkInfos []*ChunkInfo `json:"chunk_infos"` - ChunkProofs []*ChunkProof `json:"chunk_proofs"` + ChunkInfos []*ChunkInfo `json:"chunk_infos"` + ChunkProofs []*ChunkProof `json:"chunk_proofs"` + BatchHeader *codecv3.DABatch `json:"batch_header"` } -// ProofDetail is the message received from provers that contains zk proof, the status of -// the proof generation succeeded, and an error message if proof generation failed. -type ProofDetail struct { - ID string `json:"id"` - Type ProofType `json:"type,omitempty"` - Status RespStatus `json:"status"` - ChunkProof *ChunkProof `json:"chunk_proof,omitempty"` - BatchProof *BatchProof `json:"batch_proof,omitempty"` - Error string `json:"error,omitempty"` -} - -// Hash return proofMsg content hash. -func (z *ProofDetail) Hash() ([]byte, error) { - byt, err := rlp.EncodeToBytes(z) - if err != nil { - return nil, err - } - - hash := crypto.Keccak256Hash(byt) - return hash[:], nil +// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches. +type BundleTaskDetail struct { + BatchProofs []*BatchProof `json:"batch_proofs"` } // ChunkInfo is for calculating pi_hash for chunk @@ -204,15 +94,16 @@ type ChunkProof struct { // BatchProof includes the proof info that are required for batch verification and rollup. type BatchProof struct { + Protocol []byte `json:"protocol"` Proof []byte `json:"proof"` Instances []byte `json:"instances"` Vk []byte `json:"vk"` // cross-reference between cooridinator computation and prover compution - GitVersion string `json:"git_version,omitempty"` + BatchHash common.Hash `json:"batch_hash"` + GitVersion string `json:"git_version,omitempty"` } -// SanityCheck checks whether an BatchProof is in a legal format -// TODO: change to check Proof&Instance when upgrading to snark verifier v0.4 +// SanityCheck checks whether a BatchProof is in a legal format func (ap *BatchProof) SanityCheck() error { if ap == nil { return errors.New("agg_proof is nil") @@ -221,8 +112,51 @@ func (ap *BatchProof) SanityCheck() error { if len(ap.Proof) == 0 { return errors.New("proof not ready") } + + if len(ap.Proof)%32 != 0 { + return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.Proof)) + } + + if len(ap.Instances) == 0 { + return errors.New("instance not ready") + } + + if len(ap.Vk) == 0 { + return errors.New("vk not ready") + } + + return nil +} + +// BundleProof includes the proof info that are required for verification of a bundle of batch proofs. +type BundleProof struct { + Proof []byte `json:"proof"` + Instances []byte `json:"instances"` + Vk []byte `json:"vk"` + // cross-reference between cooridinator computation and prover compution + GitVersion string `json:"git_version,omitempty"` +} + +// SanityCheck checks whether a BundleProof is in a legal format +func (ap *BundleProof) SanityCheck() error { + if ap == nil { + return errors.New("agg_proof is nil") + } + + if len(ap.Proof) == 0 { + return errors.New("proof not ready") + } + if len(ap.Proof)%32 != 0 { - return fmt.Errorf("proof buffer has wrong length, expected: 32, got: %d", len(ap.Proof)) + return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.Proof)) + } + + if len(ap.Instances) == 0 { + return errors.New("instance not ready") + } + + if len(ap.Vk) == 0 { + return errors.New("vk not ready") } return nil diff --git a/common/types/message/message_test.go b/common/types/message/message_test.go deleted file mode 100644 index cbc17e827b..0000000000 --- a/common/types/message/message_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package message - -import ( - "encoding/hex" - "testing" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/crypto" - "github.com/stretchr/testify/assert" -) - -func TestAuthMessageSignAndVerify(t *testing.T) { - privkey, err := crypto.GenerateKey() - assert.NoError(t, err) - - authMsg := &AuthMsg{ - Identity: &Identity{ - Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzgxNzUsIm9yaWdfaWF0IjoxNjkxMDM0NTc1fQ.HybBMsEJFhyZqtIa2iVcHUP7CEFttf708jmTMAImAWA", - ProverName: "test", - ProverVersion: "v1.0.0", - }, - } - assert.NoError(t, authMsg.SignWithKey(privkey)) - - // Check public key. - pk, err := authMsg.PublicKey() - assert.NoError(t, err) - assert.Equal(t, common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)), pk) - - ok, err := authMsg.Verify() - assert.NoError(t, err) - assert.Equal(t, true, ok) - - // Check public key is ok. - pub, err := authMsg.PublicKey() - assert.NoError(t, err) - pubkey := crypto.CompressPubkey(&privkey.PublicKey) - assert.Equal(t, pub, common.Bytes2Hex(pubkey)) -} - -func TestGenerateToken(t *testing.T) { - token, err := GenerateToken() - assert.NoError(t, err) - assert.Equal(t, 32, len(token)) -} - -func TestIdentityHash(t *testing.T) { - identity := &Identity{ - Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzM0MTksIm9yaWdfaWF0IjoxNjkxMDI5ODE5fQ.EhkLZsj__rNPVC3ZDYBtvdh0nB8mmM_Hl82hObaIWOs", - ProverName: "test", - ProverVersion: "v1.0.0", - } - - hash, err := identity.Hash() - assert.NoError(t, err) - - expectedHash := "9b8b00f5655411ec1d68ba1666261281c5414aedbda932e5b6a9f7f1b114fdf2" - assert.Equal(t, expectedHash, hex.EncodeToString(hash)) -} - -func TestProofMessageSignVerifyPublicKey(t *testing.T) { - privkey, err := crypto.GenerateKey() - assert.NoError(t, err) - - proofMsg := &ProofMsg{ - ProofDetail: &ProofDetail{ - ID: "testID", - Type: ProofTypeChunk, - Status: StatusOk, - ChunkProof: &ChunkProof{ - StorageTrace: []byte("testStorageTrace"), - Protocol: []byte("testProtocol"), - Proof: []byte("testProof"), - Instances: []byte("testInstance"), - Vk: []byte("testVk"), - ChunkInfo: nil, - }, - Error: "testError", - }, - } - assert.NoError(t, proofMsg.Sign(privkey)) - - // Test when publicKey is not set. - ok, err := proofMsg.Verify() - assert.NoError(t, err) - assert.Equal(t, true, ok) - - // Test when publicKey is already set. - ok, err = proofMsg.Verify() - assert.NoError(t, err) - assert.Equal(t, true, ok) -} - -func TestProofDetailHash(t *testing.T) { - proofDetail := &ProofDetail{ - ID: "testID", - Type: ProofTypeChunk, - Status: StatusOk, - ChunkProof: &ChunkProof{ - StorageTrace: []byte("testStorageTrace"), - Protocol: []byte("testProtocol"), - Proof: []byte("testProof"), - Instances: []byte("testInstance"), - Vk: []byte("testVk"), - ChunkInfo: nil, - }, - Error: "testError", - } - hash, err := proofDetail.Hash() - assert.NoError(t, err) - expectedHash := "01128ea9006601146ba80dbda959c96ebaefca463e78570e473a57d821db5ec1" - assert.Equal(t, expectedHash, hex.EncodeToString(hash)) -} - -func TestProveTypeString(t *testing.T) { - proofTypeChunk := ProofType(1) - assert.Equal(t, "proof type chunk", proofTypeChunk.String()) - - proofTypeBatch := ProofType(2) - assert.Equal(t, "proof type batch", proofTypeBatch.String()) - - illegalProof := ProofType(3) - assert.Equal(t, "illegal proof type: 3", illegalProof.String()) -} - -func TestProofMsgPublicKey(t *testing.T) { - privkey, err := crypto.GenerateKey() - assert.NoError(t, err) - - proofMsg := &ProofMsg{ - ProofDetail: &ProofDetail{ - ID: "testID", - Type: ProofTypeChunk, - Status: StatusOk, - ChunkProof: &ChunkProof{ - StorageTrace: []byte("testStorageTrace"), - Protocol: []byte("testProtocol"), - Proof: []byte("testProof"), - Instances: []byte("testInstance"), - Vk: []byte("testVk"), - ChunkInfo: nil, - }, - Error: "testError", - }, - } - assert.NoError(t, proofMsg.Sign(privkey)) - - // Test when publicKey is not set. - pk, err := proofMsg.PublicKey() - assert.NoError(t, err) - assert.Equal(t, common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)), pk) - - // Test when publicKey is already set. - proofMsg.publicKey = common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)) - pk, err = proofMsg.PublicKey() - assert.NoError(t, err) - assert.Equal(t, common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)), pk) -} diff --git a/common/version/version.go b/common/version/version.go index a9e6ea77d0..4cb7e5b9d1 100644 --- a/common/version/version.go +++ b/common/version/version.go @@ -5,7 +5,7 @@ import ( "runtime/debug" ) -var tag = "v4.4.36" +var tag = "v4.4.37" var commit = func() string { if info, ok := debug.ReadBuildInfo(); ok { diff --git a/coordinator/Makefile b/coordinator/Makefile index 5f7cde3d77..5980f40e4c 100644 --- a/coordinator/Makefile +++ b/coordinator/Makefile @@ -26,6 +26,9 @@ coordinator_api: libzkp ## Builds the Coordinator api instance. coordinator_cron: go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron +coordinator_tool: + go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool + coordinator_api_skip_libzkp: go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api @@ -54,4 +57,4 @@ docker: docker_push: docker push scrolltech/coordinator-api:${IMAGE_VERSION} - docker push scrolltech/coordinator-cron:${IMAGE_VERSION} \ No newline at end of file + docker push scrolltech/coordinator-cron:${IMAGE_VERSION} diff --git a/coordinator/cmd/tool/tool.go b/coordinator/cmd/tool/tool.go new file mode 100644 index 0000000000..ec96e9cf1c --- /dev/null +++ b/coordinator/cmd/tool/tool.go @@ -0,0 +1,101 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/scroll-tech/go-ethereum/log" + "github.com/urfave/cli/v2" + + "scroll-tech/common/database" + "scroll-tech/common/types/message" + "scroll-tech/common/utils" + "scroll-tech/common/version" + + "scroll-tech/coordinator/internal/config" + "scroll-tech/coordinator/internal/orm" + coordinatorType "scroll-tech/coordinator/internal/types" +) + +var app *cli.App + +func init() { + // Set up coordinator app info. + app = cli.NewApp() + app.Action = action + app.Name = "coordinator-tool" + app.Usage = "The Scroll L2 Coordinator Tool" + app.Version = version.Version + app.Flags = append(app.Flags, utils.CommonFlags...) + app.Before = func(ctx *cli.Context) error { + return utils.LogSetup(ctx) + } +} + +func action(ctx *cli.Context) error { + cfgFile := ctx.String(utils.ConfigFileFlag.Name) + cfg, err := config.NewConfig(cfgFile) + if err != nil { + log.Crit("failed to load config file", "config file", cfgFile, "error", err) + } + db, err := database.InitDB(cfg.DB) + if err != nil { + log.Crit("failed to init db connection", "err", err) + } + defer func() { + if err = database.CloseDB(db); err != nil { + log.Error("can not close db connection", "error", err) + } + }() + + batchOrm := orm.NewBatch(db) + taskID := "fa9a290c8f1a46dc626fa67d626fadfe4803968ce776383996f3ae12504a2591" + batches, err := batchOrm.GetBatchesByBundleHash(ctx.Context, taskID) + if err != nil { + log.Error("failed to get batch proofs for batch", "task_id", taskID, "error", err) + return err + } + + if len(batches) == 0 { + log.Error("failed to get batch proofs for bundle, not found batch", "task_id", taskID) + return fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", taskID) + } + + var batchProofs []*message.BatchProof + for _, batch := range batches { + var proof message.BatchProof + if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil { + log.Error("failed to unmarshal batch proof") + return fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, taskID, batch.Hash) + } + batchProofs = append(batchProofs, &proof) + } + + taskDetail := message.BundleTaskDetail{ + BatchProofs: batchProofs, + } + + batchProofsBytes, err := json.Marshal(taskDetail) + if err != nil { + log.Error("failed to marshal batch proof") + return fmt.Errorf("failed to marshal batch proofs, taskID:%s err:%w", taskID, err) + } + + taskMsg := &coordinatorType.GetTaskSchema{ + TaskID: taskID, + TaskType: int(message.ProofTypeBundle), + TaskData: string(batchProofsBytes), + } + + log.Info("task_msg", "data", taskMsg) + return nil +} + +func main() { + // RunApp the coordinator. + if err := app.Run(os.Args); err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} diff --git a/coordinator/conf/config.json b/coordinator/conf/config.json index b5a09e25ad..15ae708f8d 100644 --- a/coordinator/conf/config.json +++ b/coordinator/conf/config.json @@ -2,13 +2,15 @@ "prover_manager": { "provers_per_session": 1, "session_attempts": 5, + "bundle_collection_time_sec": 180, "batch_collection_time_sec": 180, "chunk_collection_time_sec": 180, "verifier": { "fork_name": "bernoulli", "mock_mode": true, "params_path": "", - "assets_path": "" + "assets_path_lo": "", + "assets_path_hi": "" }, "max_verifier_workers": 4, "min_prover_version": "v1.0.0" @@ -24,7 +26,7 @@ }, "auth": { "secret": "prover secret key", - "challenge_expire_duration_sec": 10, + "challenge_expire_duration_sec": 3600, "login_expire_duration_sec": 3600 } } diff --git a/coordinator/go.mod b/coordinator/go.mod index ef37c7a2ac..72f8a08513 100644 --- a/coordinator/go.mod +++ b/coordinator/go.mod @@ -7,7 +7,7 @@ require ( github.com/gin-gonic/gin v1.9.1 github.com/go-resty/resty/v2 v2.7.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea + github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 github.com/shopspring/decimal v1.3.1 github.com/stretchr/testify v1.9.0 github.com/urfave/cli/v2 v2.25.7 @@ -37,20 +37,20 @@ require ( github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.11 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect google.golang.org/protobuf v1.33.0 // indirect ) require ( github.com/google/uuid v1.6.0 github.com/prometheus/client_golang v1.19.0 - github.com/scroll-tech/da-codec v0.0.0-20240429123441-4aaf9d35e570 + github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb ) require ( github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.12.0 // indirect + github.com/bits-and-blooms/bitset v1.13.0 // indirect github.com/btcsuite/btcd v0.20.1-beta // indirect github.com/chenzhuoyu/iasm v0.9.0 // indirect github.com/consensys/bavard v0.1.13 // indirect @@ -58,27 +58,27 @@ require ( github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect + github.com/ethereum/c-kzg-4844 v1.0.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-stack/stack v1.8.1 // indirect github.com/holiman/uint256 v1.2.4 // indirect - github.com/iden3/go-iden3-crypto v0.0.15 // indirect + github.com/iden3/go-iden3-crypto v0.0.16 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/scroll-tech/zktrie v0.8.2 // indirect + github.com/scroll-tech/zktrie v0.8.4 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect - github.com/supranational/blst v0.3.11 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect + github.com/supranational/blst v0.3.12 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.17.0 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.21.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/coordinator/go.sum b/coordinator/go.sum index 2fdec47e50..a1f4d00c88 100644 --- a/coordinator/go.sum +++ b/coordinator/go.sum @@ -7,8 +7,8 @@ github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA= -github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= +github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= @@ -43,8 +43,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA= -github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c= +github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s= +github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= @@ -96,8 +96,8 @@ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iU github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4= -github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= +github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk= +github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= @@ -173,12 +173,12 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/da-codec v0.0.0-20240429123441-4aaf9d35e570 h1:2oA2bAFPQXDZcUK8TA9qd5zj6AsURpHyBaAha5goP0c= -github.com/scroll-tech/da-codec v0.0.0-20240429123441-4aaf9d35e570/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM= -github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls= -github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA= -github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ= -github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= +github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA= +github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= +github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ= +github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ= +github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= +github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= @@ -196,8 +196,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= -github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8= +github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw= @@ -206,10 +206,10 @@ github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= @@ -221,8 +221,8 @@ github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6S github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y= golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= @@ -232,8 +232,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -243,13 +243,13 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -265,10 +265,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= @@ -277,8 +275,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= diff --git a/coordinator/internal/config/config.go b/coordinator/internal/config/config.go index 55c5c68a83..cbe9ca02d8 100644 --- a/coordinator/internal/config/config.go +++ b/coordinator/internal/config/config.go @@ -21,6 +21,8 @@ type ProverManager struct { BatchCollectionTimeSec int `json:"batch_collection_time_sec"` // ChunkCollectionTimeSec chunk Proof collection time (in seconds). ChunkCollectionTimeSec int `json:"chunk_collection_time_sec"` + // BundleCollectionTimeSec bundle Proof collection time (in seconds). + BundleCollectionTimeSec int `json:"bundle_collection_time_sec"` // Max number of workers in verifier worker pool MaxVerifierWorkers int `json:"max_verifier_workers"` // MinProverVersion is the minimum version of the prover that is required. @@ -50,10 +52,11 @@ type Config struct { // VerifierConfig load zk verifier config. type VerifierConfig struct { - ForkName string `json:"fork_name"` - MockMode bool `json:"mock_mode"` - ParamsPath string `json:"params_path"` - AssetsPath string `json:"assets_path"` + ForkName string `json:"fork_name"` + MockMode bool `json:"mock_mode"` + ParamsPath string `json:"params_path"` + AssetsPathLo string `json:"assets_path_lo"` // lower version Verifier + AssetsPathHi string `json:"assets_path_hi"` // higher version Verifier } // NewConfig returns a new instance of Config. diff --git a/coordinator/internal/controller/api/auth.go b/coordinator/internal/controller/api/auth.go index 56f8518d28..205807e676 100644 --- a/coordinator/internal/controller/api/auth.go +++ b/coordinator/internal/controller/api/auth.go @@ -8,9 +8,9 @@ import ( "github.com/gin-gonic/gin" "gorm.io/gorm" - "scroll-tech/common/types/message" - + "scroll-tech/coordinator/internal/config" "scroll-tech/coordinator/internal/logic/auth" + "scroll-tech/coordinator/internal/logic/verifier" "scroll-tech/coordinator/internal/types" ) @@ -20,9 +20,9 @@ type AuthController struct { } // NewAuthController returns an LoginController instance -func NewAuthController(db *gorm.DB) *AuthController { +func NewAuthController(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *AuthController { return &AuthController{ - loginLogic: auth.NewLoginLogic(db), + loginLogic: auth.NewLoginLogic(db, cfg, vf), } } @@ -40,6 +40,10 @@ func (a *AuthController) Login(c *gin.Context) (interface{}, error) { return "", errors.New("check challenge failure for the not equal challenge string") } + if err := a.loginLogic.Check(&login); err != nil { + return "", fmt.Errorf("check the login parameter failure: %w", err) + } + // check the challenge is used, if used, return failure if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil { return "", fmt.Errorf("login insert challenge string failure:%w", err) @@ -54,44 +58,10 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims { return jwt.MapClaims{} } - var publicKey string - var err error - if v.Message.HardForkName != "" { - authMsg := message.AuthMsg{ - Identity: &message.Identity{ - Challenge: v.Message.Challenge, - ProverName: v.Message.ProverName, - ProverVersion: v.Message.ProverVersion, - HardForkName: v.Message.HardForkName, - }, - Signature: v.Signature, - } - publicKey, err = authMsg.PublicKey() - } else { - authMsg := message.LegacyAuthMsg{ - Identity: &message.LegacyIdentity{ - Challenge: v.Message.Challenge, - ProverName: v.Message.ProverName, - ProverVersion: v.Message.ProverVersion, - }, - Signature: v.Signature, - } - publicKey, err = authMsg.PublicKey() - } - - if err != nil { - return jwt.MapClaims{} - } - - if v.Message.HardForkName == "" { - v.Message.HardForkName = "shanghai" - } - return jwt.MapClaims{ - types.PublicKey: publicKey, + types.PublicKey: v.PublicKey, types.ProverName: v.Message.ProverName, types.ProverVersion: v.Message.ProverVersion, - types.HardForkName: v.Message.HardForkName, } } @@ -109,9 +79,5 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} { if proverVersion, ok := claims[types.ProverVersion]; ok { c.Set(types.ProverVersion, proverVersion) } - - if hardForkName, ok := claims[types.HardForkName]; ok { - c.Set(types.HardForkName, hardForkName) - } return nil } diff --git a/coordinator/internal/controller/api/controller.go b/coordinator/internal/controller/api/controller.go index a1bf61fd7d..e20a2d911d 100644 --- a/coordinator/internal/controller/api/controller.go +++ b/coordinator/internal/controller/api/controller.go @@ -28,7 +28,7 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap) - Auth = NewAuthController(db) - GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg) - SubmitProof = NewSubmitProofController(cfg, db, vf, reg) + Auth = NewAuthController(db, cfg, vf) + GetTask = NewGetTaskController(cfg, chainCfg, db, reg) + SubmitProof = NewSubmitProofController(cfg, chainCfg, db, vf, reg) } diff --git a/coordinator/internal/controller/api/get_task.go b/coordinator/internal/controller/api/get_task.go index 45ddd0c694..c6fd3d5085 100644 --- a/coordinator/internal/controller/api/get_task.go +++ b/coordinator/internal/controller/api/get_task.go @@ -17,7 +17,6 @@ import ( "scroll-tech/coordinator/internal/config" "scroll-tech/coordinator/internal/logic/provertask" - "scroll-tech/coordinator/internal/logic/verifier" coordinatorType "scroll-tech/coordinator/internal/types" ) @@ -29,9 +28,10 @@ type GetTaskController struct { } // NewGetTaskController create a get prover task controller -func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController { - chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVKMap, reg) - batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVKMap, reg) +func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *GetTaskController { + chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, reg) + batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, reg) + bundleProverTask := provertask.NewBundleProverTask(cfg, chainCfg, db, reg) ptc := &GetTaskController{ proverTasks: make(map[message.ProofType]provertask.ProverTask), @@ -43,7 +43,7 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db * ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask ptc.proverTasks[message.ProofTypeBatch] = batchProverTask - + ptc.proverTasks[message.ProofTypeBundle] = bundleProverTask return ptc } @@ -107,18 +107,25 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) { } func (ptc *GetTaskController) proofType(para *coordinatorType.GetTaskParameter) message.ProofType { - proofType := message.ProofType(para.TaskType) + var proofTypes []message.ProofType + if para.TaskType != 0 { + proofTypes = append(proofTypes, message.ProofType(para.TaskType)) + } - proofTypes := []message.ProofType{ - message.ProofTypeChunk, - message.ProofTypeBatch, + for _, proofType := range para.TaskTypes { + proofTypes = append(proofTypes, message.ProofType(proofType)) } - if proofType == message.ProofTypeUndefined { - rand.Shuffle(len(proofTypes), func(i, j int) { - proofTypes[i], proofTypes[j] = proofTypes[j], proofTypes[i] - }) - proofType = proofTypes[0] + if len(proofTypes) == 0 { + proofTypes = []message.ProofType{ + message.ProofTypeChunk, + message.ProofTypeBatch, + message.ProofTypeBundle, + } } - return proofType + + rand.Shuffle(len(proofTypes), func(i, j int) { + proofTypes[i], proofTypes[j] = proofTypes[j], proofTypes[i] + }) + return proofTypes[0] } diff --git a/coordinator/internal/controller/api/submit_proof.go b/coordinator/internal/controller/api/submit_proof.go index d4ac0c7091..e746cc256c 100644 --- a/coordinator/internal/controller/api/submit_proof.go +++ b/coordinator/internal/controller/api/submit_proof.go @@ -1,15 +1,14 @@ package api import ( - "encoding/json" "fmt" "github.com/gin-gonic/gin" "github.com/prometheus/client_golang/prometheus" + "github.com/scroll-tech/go-ethereum/params" "gorm.io/gorm" "scroll-tech/common/types" - "scroll-tech/common/types/message" "scroll-tech/coordinator/internal/config" "scroll-tech/coordinator/internal/logic/submitproof" @@ -23,9 +22,9 @@ type SubmitProofController struct { } // NewSubmitProofController create the submit proof api controller instance -func NewSubmitProofController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *SubmitProofController { +func NewSubmitProofController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *SubmitProofController { return &SubmitProofController{ - submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, db, vf, reg), + submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, chainCfg, db, vf, reg), } } @@ -38,36 +37,7 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) { return } - proofMsg := message.ProofMsg{ - ProofDetail: &message.ProofDetail{ - ID: spp.TaskID, - Type: message.ProofType(spp.TaskType), - Status: message.RespStatus(spp.Status), - }, - } - - if spp.Status == int(message.StatusOk) { - switch message.ProofType(spp.TaskType) { - case message.ProofTypeChunk: - var tmpChunkProof message.ChunkProof - if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil { - nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err) - types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr) - return - } - proofMsg.ChunkProof = &tmpChunkProof - case message.ProofTypeBatch: - var tmpBatchProof message.BatchProof - if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil { - nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err) - types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr) - return - } - proofMsg.BatchProof = &tmpBatchProof - } - } - - if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg, spp); err != nil { + if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, spp); err != nil { nerr := fmt.Errorf("handle zk proof failure, err:%w", err) types.RenderFailure(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr) return diff --git a/coordinator/internal/controller/cron/collect_proof.go b/coordinator/internal/controller/cron/collect_proof.go index 7fb95d4abd..92ff0576f4 100644 --- a/coordinator/internal/controller/cron/collect_proof.go +++ b/coordinator/internal/controller/cron/collect_proof.go @@ -23,38 +23,55 @@ type Collector struct { db *gorm.DB ctx context.Context - stopChunkTimeoutChan chan struct{} - stopBatchTimeoutChan chan struct{} - stopBatchAllChunkReadyChan chan struct{} - stopCleanChallengeChan chan struct{} + stopBundleTimeoutChan chan struct{} + stopChunkTimeoutChan chan struct{} + stopBatchTimeoutChan chan struct{} + stopBatchAllChunkReadyChan chan struct{} + stopBundleAllBatchReadyChan chan struct{} + stopCleanChallengeChan chan struct{} proverTaskOrm *orm.ProverTask + bundleOrm *orm.Bundle chunkOrm *orm.Chunk batchOrm *orm.Batch challenge *orm.Challenge - timeoutBatchCheckerRunTotal prometheus.Counter - batchProverTaskTimeoutTotal prometheus.Counter - timeoutChunkCheckerRunTotal prometheus.Counter - chunkProverTaskTimeoutTotal prometheus.Counter - checkBatchAllChunkReadyRunTotal prometheus.Counter + timeoutBundleCheckerRunTotal prometheus.Counter + bundleProverTaskTimeoutTotal prometheus.Counter + timeoutBatchCheckerRunTotal prometheus.Counter + batchProverTaskTimeoutTotal prometheus.Counter + timeoutChunkCheckerRunTotal prometheus.Counter + chunkProverTaskTimeoutTotal prometheus.Counter + checkBatchAllChunkReadyRunTotal prometheus.Counter + checkBundleAllBatchReadyRunTotal prometheus.Counter } // NewCollector create a collector to cron collect the data to send to prover func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prometheus.Registerer) *Collector { c := &Collector{ - cfg: cfg, - db: db, - ctx: ctx, - stopChunkTimeoutChan: make(chan struct{}), - stopBatchTimeoutChan: make(chan struct{}), - stopBatchAllChunkReadyChan: make(chan struct{}), - stopCleanChallengeChan: make(chan struct{}), - proverTaskOrm: orm.NewProverTask(db), - chunkOrm: orm.NewChunk(db), - batchOrm: orm.NewBatch(db), - challenge: orm.NewChallenge(db), - + cfg: cfg, + db: db, + ctx: ctx, + stopBundleTimeoutChan: make(chan struct{}), + stopChunkTimeoutChan: make(chan struct{}), + stopBatchTimeoutChan: make(chan struct{}), + stopBatchAllChunkReadyChan: make(chan struct{}), + stopBundleAllBatchReadyChan: make(chan struct{}), + stopCleanChallengeChan: make(chan struct{}), + proverTaskOrm: orm.NewProverTask(db), + chunkOrm: orm.NewChunk(db), + batchOrm: orm.NewBatch(db), + bundleOrm: orm.NewBundle(db), + challenge: orm.NewChallenge(db), + + timeoutBundleCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "coordinator_bundle_timeout_checker_run_total", + Help: "Total number of bundle timeout checker run.", + }), + bundleProverTaskTimeoutTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "coordinator_bundle_prover_task_timeout_total", + Help: "Total number of bundle timeout prover task.", + }), timeoutBatchCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ Name: "coordinator_batch_timeout_checker_run_total", Help: "Total number of batch timeout checker run.", @@ -75,11 +92,17 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom Name: "coordinator_check_batch_all_chunk_ready_run_total", Help: "Total number of check batch all chunks ready total", }), + checkBundleAllBatchReadyRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "coordinator_check_bundle_all_batch_ready_run_total", + Help: "Total number of check bundle all batches ready total", + }), } + go c.timeoutBundleProofTask() go c.timeoutBatchProofTask() go c.timeoutChunkProofTask() go c.checkBatchAllChunkReady() + go c.checkBundleAllBatchReady() go c.cleanupChallenge() log.Info("Start coordinator cron successfully.") @@ -91,10 +114,45 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom func (c *Collector) Stop() { c.stopChunkTimeoutChan <- struct{}{} c.stopBatchTimeoutChan <- struct{}{} + c.stopBundleTimeoutChan <- struct{}{} c.stopBatchAllChunkReadyChan <- struct{}{} c.stopCleanChallengeChan <- struct{}{} } +// timeoutBundleProofTask cron checks the send task is timeout. if timeout reached, restore the +// bundle task to unassigned. then the bundle collector can retry it. +func (c *Collector) timeoutBundleProofTask() { + defer func() { + if err := recover(); err != nil { + nerr := fmt.Errorf("timeout bundle proof task panic error:%v", err) + log.Warn(nerr.Error()) + } + }() + + ticker := time.NewTicker(time.Second * 2) + for { + select { + case <-ticker.C: + c.timeoutBundleCheckerRunTotal.Inc() + timeout := time.Duration(c.cfg.ProverManager.BundleCollectionTimeSec) * time.Second + assignedProverTasks, err := c.proverTaskOrm.GetTimeoutAssignedProverTasks(c.ctx, 10, message.ProofTypeBundle, timeout) + if err != nil { + log.Error("get unassigned session info failure", "error", err) + break + } + c.check(assignedProverTasks, c.bundleProverTaskTimeoutTotal) + case <-c.ctx.Done(): + if c.ctx.Err() != nil { + log.Error("manager context canceled with error", "error", c.ctx.Err()) + } + return + case <-c.stopBundleTimeoutChan: + log.Info("the coordinator timeoutBundleProofTask run loop exit") + return + } + } +} + // timeoutBatchProofTask cron check the send task is timeout. if timeout reached, restore the // chunk/batch task to unassigned. then the batch/chunk collector can retry it. func (c *Collector) timeoutBatchProofTask() { @@ -202,6 +260,16 @@ func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout promethe log.Error("update proving status failed failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err) return err } + case message.ProofTypeBundle: + if err := c.bundleOrm.DecreaseActiveAttemptsByHash(c.ctx, assignedProverTask.TaskID, tx); err != nil { + log.Error("decrease bundle active attempts failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err) + return err + } + + if err := c.bundleOrm.UpdateProvingStatusFailed(c.ctx, assignedProverTask.TaskID, c.cfg.ProverManager.SessionAttempts, tx); err != nil { + log.Error("update proving status failed failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err) + return err + } } return nil @@ -268,3 +336,60 @@ func (c *Collector) checkBatchAllChunkReady() { } } } + +func (c *Collector) checkBundleAllBatchReady() { + defer func() { + if err := recover(); err != nil { + nerr := fmt.Errorf("check batch all batches ready panic error:%v", err) + log.Warn(nerr.Error()) + } + }() + + ticker := time.NewTicker(time.Second * 10) + for { + select { + case <-ticker.C: + c.checkBundleAllBatchReadyRunTotal.Inc() + page := 1 + pageSize := 50 + for { + offset := (page - 1) * pageSize + bundles, err := c.bundleOrm.GetUnassignedAndBatchesUnreadyBundles(c.ctx, offset, pageSize) + if err != nil { + log.Warn("checkBundleAllBatchReady GetUnassignedAndBatchesUnreadyBundles", "error", err) + break + } + + for _, bundle := range bundles { + allReady, checkErr := c.batchOrm.CheckIfBundleBatchProofsAreReady(c.ctx, bundle.Hash) + if checkErr != nil { + log.Warn("checkBundleAllBatchReady CheckIfBundleBatchProofsAreReady failure", "error", checkErr, "hash", bundle.Hash) + continue + } + + if !allReady { + continue + } + + if updateErr := c.bundleOrm.UpdateBatchProofsStatusByBundleHash(c.ctx, bundle.Hash, types.BatchProofsStatusReady); updateErr != nil { + log.Warn("checkBundleAllBatchReady UpdateBatchProofsStatusByBundleHash failure", "error", checkErr, "hash", bundle.Hash) + } + } + + if len(bundles) < pageSize { + break + } + page++ + } + + case <-c.ctx.Done(): + if c.ctx.Err() != nil { + log.Error("manager context canceled with error", "error", c.ctx.Err()) + } + return + case <-c.stopBundleAllBatchReadyChan: + log.Info("the coordinator checkBundleAllBatchReady run loop exit") + return + } + } +} diff --git a/coordinator/internal/logic/auth/login.go b/coordinator/internal/logic/auth/login.go index 85aef04006..1d7d5e113a 100644 --- a/coordinator/internal/logic/auth/login.go +++ b/coordinator/internal/logic/auth/login.go @@ -1,25 +1,104 @@ package auth import ( + "errors" + "fmt" + "github.com/gin-gonic/gin" + "github.com/scroll-tech/go-ethereum/log" "gorm.io/gorm" + "scroll-tech/common/version" + + "scroll-tech/coordinator/internal/config" + "scroll-tech/coordinator/internal/logic/verifier" "scroll-tech/coordinator/internal/orm" + "scroll-tech/coordinator/internal/types" ) // LoginLogic the auth logic type LoginLogic struct { + cfg *config.Config challengeOrm *orm.Challenge + chunkVks map[string]struct{} + batchVKs map[string]struct{} + bundleVks map[string]struct{} } // NewLoginLogic new a LoginLogic -func NewLoginLogic(db *gorm.DB) *LoginLogic { - return &LoginLogic{ +func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic { + l := &LoginLogic{ + cfg: cfg, + chunkVks: make(map[string]struct{}), + batchVKs: make(map[string]struct{}), + bundleVks: make(map[string]struct{}), challengeOrm: orm.NewChallenge(db), } + + for _, vk := range vf.ChunkVKMap { + l.chunkVks[vk] = struct{}{} + } + + for _, vk := range vf.BatchVKMap { + l.batchVKs[vk] = struct{}{} + } + + for _, vk := range vf.BundleVkMap { + l.bundleVks[vk] = struct{}{} + } + + return l } // InsertChallengeString insert and check the challenge string is existed func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error { return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge) } + +func (l *LoginLogic) Check(login *types.LoginParameter) error { + if login.PublicKey != "" { + verify, err := login.Verify() + if err != nil || !verify { + return errors.New("auth message verify failure") + } + } + + if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.MinProverVersion) { + return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", + l.cfg.ProverManager.MinProverVersion, login.Message.ProverVersion) + } + + if len(login.Message.ProverTypes) > 0 { + vks := make(map[string]struct{}) + for _, proverType := range login.Message.ProverTypes { + switch proverType { + case types.ProverTypeChunk: + for vk := range l.chunkVks { + vks[vk] = struct{}{} + } + case types.ProverTypeBatch: + for vk := range l.batchVKs { + vks[vk] = struct{}{} + } + for vk := range l.bundleVks { + vks[vk] = struct{}{} + } + default: + log.Error("invalid prover_type", "value", proverType) + } + } + + for _, vk := range login.Message.VKs { + if _, ok := vks[vk]; !ok { + log.Error("vk inconsistency", "prover vk", vk) + if !version.CheckScrollProverVersion(login.Message.ProverVersion) { + return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", + version.Version, login.Message.ProverVersion) + } + // if the prover reports a same prover version + return errors.New("incompatible vk. please check your params files or config files") + } + } + } + return nil +} diff --git a/coordinator/internal/logic/provertask/batch_prover_task.go b/coordinator/internal/logic/provertask/batch_prover_task.go index 79cc4f8aea..cfc649c029 100644 --- a/coordinator/internal/logic/provertask/batch_prover_task.go +++ b/coordinator/internal/logic/provertask/batch_prover_task.go @@ -4,12 +4,12 @@ import ( "context" "encoding/json" "fmt" - "math" "time" "github.com/gin-gonic/gin" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/scroll-tech/da-codec/encoding/codecv3" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/params" @@ -35,18 +35,13 @@ type BatchProverTask struct { } // NewBatchProverTask new a batch collector -func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *BatchProverTask { - forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg) - log.Info("new batch prover task", "forkHeights", forkHeights, "nameForks", nameForkMap) - +func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProverTask { bp := &BatchProverTask{ BaseProverTask: BaseProverTask{ - vkMap: vkMap, - reverseVkMap: reverseMap(vkMap), db: db, cfg: cfg, - nameForkMap: nameForkMap, - forkHeights: forkHeights, + chainCfg: chainCfg, + blockOrm: orm.NewL2Block(db), chunkOrm: orm.NewChunk(db), batchOrm: orm.NewBatch(db), proverTaskOrm: orm.NewProverTask(db), @@ -65,38 +60,20 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go return bp } -type chunkIndexRange struct { - start uint64 - end uint64 -} - -func (r *chunkIndexRange) merge(o chunkIndexRange) *chunkIndexRange { - var start, end = r.start, r.end - if o.start < r.start { - start = o.start - } - if o.end > r.end { - end = o.end +// Assign load and assign batch tasks +func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) { + taskCtx, err := bp.checkParameter(ctx) + if err != nil || taskCtx == nil { + return nil, fmt.Errorf("check prover task parameter failed, error:%w", err) } - return &chunkIndexRange{start, end} -} - -func (r *chunkIndexRange) contains(start, end uint64) bool { - return r.start <= start && r.end > end -} - -type getHardForkNameByBatchFunc func(*orm.Batch) (string, error) -func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCtx *proverTaskContext, - chunkRange *chunkIndexRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByBatchFunc) (*coordinatorType.GetTaskSchema, error) { - startChunkIndex, endChunkIndex := chunkRange.start, chunkRange.end maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts var batchTask *orm.Batch for i := 0; i < 5; i++ { var getTaskError error var tmpBatchTask *orm.Batch - tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts) + tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts) if getTaskError != nil { log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError) return nil, ErrCoordinatorInternalFailure @@ -105,7 +82,7 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt // Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned` // batch to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql. if tmpBatchTask == nil { - tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts) + tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts) if getTaskError != nil { log.Error("failed to get unassigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError) return nil, ErrCoordinatorInternalFailure @@ -138,17 +115,12 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt } log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName) - var ( - proverVersion = taskCtx.ProverVersion - hardForkName = taskCtx.HardForkName - ) - var err error - if getHardForkName != nil { - hardForkName, err = getHardForkName(batchTask) - if err != nil { - log.Error("failed to get hard fork name by batch", "task_id", batchTask.Hash, "error", err.Error()) - return nil, ErrCoordinatorInternalFailure - } + + hardForkName, getHardForkErr := bp.hardForkName(ctx, batchTask) + if getHardForkErr != nil { + bp.recoverActiveAttempts(ctx, batchTask) + log.Error("retrieve hard fork name by batch failed", "task_id", batchTask.Hash, "err", getHardForkErr) + return nil, ErrCoordinatorInternalFailure } proverTask := orm.ProverTask{ @@ -156,10 +128,10 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt ProverPublicKey: taskCtx.PublicKey, TaskType: int16(message.ProofTypeBatch), ProverName: taskCtx.ProverName, - ProverVersion: proverVersion, + ProverVersion: taskCtx.ProverVersion, ProvingStatus: int16(types.ProverAssigned), FailureType: int16(types.ProverTaskFailureTypeUndefined), - // here why need use UTC time. see scroll/common/databased/db.go + // here why need use UTC time. see scroll/common/database/db.go AssignedAt: utils.NowUTC(), } @@ -170,7 +142,7 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt return nil, ErrCoordinatorInternalFailure } - taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask) + taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, batchTask, hardForkName) if err != nil { bp.recoverActiveAttempts(ctx, batchTask) log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err) @@ -187,115 +159,21 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt return taskMsg, nil } -func (bp *BatchProverTask) getChunkRangeByName(ctx *gin.Context, hardForkName string) (*chunkIndexRange, error) { - hardForkNumber, err := bp.getHardForkNumberByName(hardForkName) - if err != nil { - log.Error("batch assign failure because of the hard fork name don't exist", "fork name", hardForkName) - return nil, err +func (bp *BatchProverTask) hardForkName(ctx *gin.Context, batchTask *orm.Batch) (string, error) { + startChunk, getChunkErr := bp.chunkOrm.GetChunkByHash(ctx, batchTask.StartChunkHash) + if getChunkErr != nil { + return "", getChunkErr } - // if the hard fork number set, rollup relayer must generate the chunk from hard fork number, - // so the hard fork chunk's start_block_number must be ForkBlockNumber - var startChunkIndex uint64 = 0 - var endChunkIndex uint64 = math.MaxInt64 - fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights) - if fromBlockNum != 0 { - startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum) - if chunkErr != nil { - log.Error("failed to get fork start chunk index", "forkName", hardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr) - return nil, ErrCoordinatorInternalFailure - } - if startChunk == nil { - return nil, nil - } - startChunkIndex = startChunk.Index + l2Block, getBlockErr := bp.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber) + if getBlockErr != nil { + return "", getBlockErr } - if toBlockNum != math.MaxInt64 { - toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum) - if chunkErr != nil { - log.Error("failed to get fork end chunk index", "forkName", hardForkName, "toBlockNumber", toBlockNum, "err", chunkErr) - return nil, ErrCoordinatorInternalFailure - } - if toChunk != nil { - // toChunk being nil only indicates that we haven't yet reached the fork boundary - // don't need change the endChunkIndex of math.MaxInt64 - endChunkIndex = toChunk.Index - } - } - return &chunkIndexRange{startChunkIndex, endChunkIndex}, nil + hardForkName := forks.GetHardforkName(bp.chainCfg, l2Block.Number, l2Block.BlockTimestamp) + return hardForkName, nil } -func (bp *BatchProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) { - chunkRange, err := bp.getChunkRangeByName(ctx, taskCtx.HardForkName) - if err != nil { - return nil, err - } - if chunkRange == nil { - return nil, nil - } - return bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, nil) -} - -func (bp *BatchProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) { - var ( - hardForkNames [2]string - chunkRanges [2]*chunkIndexRange - err error - ) - var chunkRange *chunkIndexRange - for i := 0; i < 2; i++ { - hardForkNames[i] = bp.reverseVkMap[getTaskParameter.VKs[i]] - chunkRanges[i], err = bp.getChunkRangeByName(ctx, hardForkNames[i]) - if err != nil { - return nil, err - } - if chunkRanges[i] != nil { - if chunkRange == nil { - chunkRange = chunkRanges[i] - } else { - chunkRange = chunkRange.merge(*chunkRanges[i]) - } - } - } - if chunkRange == nil { - return nil, nil - } - var hardForkName string - getHardForkName := func(batch *orm.Batch) (string, error) { - for i := 0; i < 2; i++ { - if chunkRanges[i] != nil && chunkRanges[i].contains(batch.StartChunkIndex, batch.EndChunkIndex) { - hardForkName = hardForkNames[i] - break - } - } - if hardForkName == "" { - log.Warn("get batch not belongs to any hard fork name", "batch id", batch.Index) - return "", fmt.Errorf("get batch not belongs to any hard fork name, batch id: %d", batch.Index) - } - return hardForkName, nil - } - schema, err := bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, getHardForkName) - if schema != nil && err == nil { - schema.HardForkName = hardForkName - return schema, nil - } - return schema, err -} - -// Assign load and assign batch tasks -func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) { - taskCtx, err := bp.checkParameter(ctx, getTaskParameter) - if err != nil || taskCtx == nil { - return nil, fmt.Errorf("check prover task parameter failed, error:%w", err) - } - - if len(getTaskParameter.VKs) > 0 { - return bp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter) - } - return bp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter) -} - -func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) { +func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, batch *orm.Batch, hardForkName string) (*coordinatorType.GetTaskSchema, error) { // get chunk from db chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, task.TaskID) if err != nil { @@ -303,6 +181,10 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove return nil, err } + if len(chunks) == 0 { + return nil, fmt.Errorf("no chunk found for batch task id:%s", task.TaskID) + } + var chunkProofs []*message.ChunkProof var chunkInfos []*message.ChunkInfo for _, chunk := range chunks { @@ -331,16 +213,25 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove ChunkProofs: chunkProofs, } + if hardForkName == "darwin" { + batchHeader, decodeErr := codecv3.NewDABatchFromBytes(batch.BatchHeader) + if decodeErr != nil { + return nil, fmt.Errorf("failed to decode batch header, taskID:%s err:%w", task.TaskID, decodeErr) + } + taskDetail.BatchHeader = batchHeader + } + chunkProofsBytes, err := json.Marshal(taskDetail) if err != nil { return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", task.TaskID, err) } taskMsg := &coordinatorType.GetTaskSchema{ - UUID: task.UUID.String(), - TaskID: task.TaskID, - TaskType: int(message.ProofTypeBatch), - TaskData: string(chunkProofsBytes), + UUID: task.UUID.String(), + TaskID: task.TaskID, + TaskType: int(message.ProofTypeBatch), + TaskData: string(chunkProofsBytes), + HardForkName: hardForkName, } return taskMsg, nil } diff --git a/coordinator/internal/logic/provertask/bundle_prover_task.go b/coordinator/internal/logic/provertask/bundle_prover_task.go new file mode 100644 index 0000000000..d244d2280b --- /dev/null +++ b/coordinator/internal/logic/provertask/bundle_prover_task.go @@ -0,0 +1,225 @@ +package provertask + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/gin-gonic/gin" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/params" + "gorm.io/gorm" + + "scroll-tech/common/forks" + "scroll-tech/common/types" + "scroll-tech/common/types/message" + "scroll-tech/common/utils" + + "scroll-tech/coordinator/internal/config" + "scroll-tech/coordinator/internal/orm" + coordinatorType "scroll-tech/coordinator/internal/types" +) + +// BundleProverTask is prover task implement for bundle proof +type BundleProverTask struct { + BaseProverTask + + bundleAttemptsExceedTotal prometheus.Counter + bundleTaskGetTaskTotal *prometheus.CounterVec + bundleTaskGetTaskProver *prometheus.CounterVec +} + +// NewBundleProverTask new a bundle collector +func NewBundleProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BundleProverTask { + bp := &BundleProverTask{ + BaseProverTask: BaseProverTask{ + db: db, + chainCfg: chainCfg, + cfg: cfg, + blockOrm: orm.NewL2Block(db), + chunkOrm: orm.NewChunk(db), + batchOrm: orm.NewBatch(db), + bundleOrm: orm.NewBundle(db), + proverTaskOrm: orm.NewProverTask(db), + proverBlockListOrm: orm.NewProverBlockList(db), + }, + bundleAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "coordinator_bundle_attempts_exceed_total", + Help: "Total number of bundle attempts exceed.", + }), + bundleTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "coordinator_bundle_get_task_total", + Help: "Total number of bundle get task.", + }, []string{"fork_name"}), + bundleTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "bundle"), + } + return bp +} + +// Assign load and assign batch tasks +func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) { + taskCtx, err := bp.checkParameter(ctx) + if err != nil || taskCtx == nil { + return nil, fmt.Errorf("check prover task parameter failed, error:%w", err) + } + + maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession + maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts + var bundleTask *orm.Bundle + for i := 0; i < 5; i++ { + var getTaskError error + var tmpBundleTask *orm.Bundle + tmpBundleTask, getTaskError = bp.bundleOrm.GetAssignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts) + if getTaskError != nil { + log.Error("failed to get assigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError) + return nil, ErrCoordinatorInternalFailure + } + + // Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned` + // bundle to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql. + if tmpBundleTask == nil { + tmpBundleTask, getTaskError = bp.bundleOrm.GetUnassignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts) + if getTaskError != nil { + log.Error("failed to get unassigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError) + return nil, ErrCoordinatorInternalFailure + } + } + + if tmpBundleTask == nil { + log.Debug("get empty bundle", "height", getTaskParameter.ProverHeight) + return nil, nil + } + + rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts) + if updateAttemptsErr != nil { + log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr) + return nil, ErrCoordinatorInternalFailure + } + + if rowsAffected == 0 { + time.Sleep(100 * time.Millisecond) + continue + } + + bundleTask = tmpBundleTask + break + } + + if bundleTask == nil { + log.Debug("get empty unassigned bundle after retry 5 times", "height", getTaskParameter.ProverHeight) + return nil, nil + } + + log.Info("start bundle proof generation session", "task index", bundleTask.Index, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName) + + hardForkName, getHardForkErr := bp.hardForkName(ctx, bundleTask) + if getHardForkErr != nil { + bp.recoverActiveAttempts(ctx, bundleTask) + log.Error("retrieve hard fork name by bundle failed", "task_id", bundleTask.Hash, "err", getHardForkErr) + return nil, ErrCoordinatorInternalFailure + } + + proverTask := orm.ProverTask{ + TaskID: bundleTask.Hash, + ProverPublicKey: taskCtx.PublicKey, + TaskType: int16(message.ProofTypeBundle), + ProverName: taskCtx.ProverName, + ProverVersion: taskCtx.ProverVersion, + ProvingStatus: int16(types.ProverAssigned), + FailureType: int16(types.ProverTaskFailureTypeUndefined), + // here why need use UTC time. see scroll/common/database/db.go + AssignedAt: utils.NowUTC(), + } + + // Store session info. + if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil { + bp.recoverActiveAttempts(ctx, bundleTask) + log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err) + return nil, ErrCoordinatorInternalFailure + } + + taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, hardForkName) + if err != nil { + bp.recoverActiveAttempts(ctx, bundleTask) + log.Error("format bundle prover task failure", "task_id", bundleTask.Hash, "err", err) + return nil, ErrCoordinatorInternalFailure + } + + bp.bundleTaskGetTaskTotal.WithLabelValues(hardForkName).Inc() + bp.bundleTaskGetTaskProver.With(prometheus.Labels{ + coordinatorType.LabelProverName: proverTask.ProverName, + coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey, + coordinatorType.LabelProverVersion: proverTask.ProverVersion, + }).Inc() + + return taskMsg, nil +} + +func (bp *BundleProverTask) hardForkName(ctx *gin.Context, bundleTask *orm.Bundle) (string, error) { + startBatch, getBatchErr := bp.batchOrm.GetBatchByHash(ctx, bundleTask.StartBatchHash) + if getBatchErr != nil { + return "", getBatchErr + } + + startChunk, getChunkErr := bp.chunkOrm.GetChunkByHash(ctx, startBatch.StartChunkHash) + if getChunkErr != nil { + return "", getChunkErr + } + + l2Block, getBlockErr := bp.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber) + if getBlockErr != nil { + return "", getBlockErr + } + + hardForkName := forks.GetHardforkName(bp.chainCfg, l2Block.Number, l2Block.BlockTimestamp) + return hardForkName, nil +} + +func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, hardForkName string) (*coordinatorType.GetTaskSchema, error) { + // get bundle from db + batches, err := bp.batchOrm.GetBatchesByBundleHash(ctx, task.TaskID) + if err != nil { + err = fmt.Errorf("failed to get batch proofs for batch task id:%s err:%w ", task.TaskID, err) + return nil, err + } + + if len(batches) == 0 { + return nil, fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", task.TaskID) + } + + var batchProofs []*message.BatchProof + for _, batch := range batches { + var proof message.BatchProof + if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil { + return nil, fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, task.TaskID, batch.Hash) + } + batchProofs = append(batchProofs, &proof) + } + + taskDetail := message.BundleTaskDetail{ + BatchProofs: batchProofs, + } + + batchProofsBytes, err := json.Marshal(taskDetail) + if err != nil { + return nil, fmt.Errorf("failed to marshal batch proofs, taskID:%s err:%w", task.TaskID, err) + } + + taskMsg := &coordinatorType.GetTaskSchema{ + UUID: task.UUID.String(), + TaskID: task.TaskID, + TaskType: int(message.ProofTypeBundle), + TaskData: string(batchProofsBytes), + HardForkName: hardForkName, + } + return taskMsg, nil +} + +func (bp *BundleProverTask) recoverActiveAttempts(ctx *gin.Context, bundleTask *orm.Bundle) { + if err := bp.bundleOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), bundleTask.Hash); err != nil { + log.Error("failed to recover bundle active attempts", "hash", bundleTask.Hash, "error", err) + } +} diff --git a/coordinator/internal/logic/provertask/chunk_prover_task.go b/coordinator/internal/logic/provertask/chunk_prover_task.go index 78bbbe61a5..b5527ff495 100644 --- a/coordinator/internal/logic/provertask/chunk_prover_task.go +++ b/coordinator/internal/logic/provertask/chunk_prover_task.go @@ -3,7 +3,6 @@ package provertask import ( "context" "encoding/json" - "errors" "fmt" "time" @@ -34,17 +33,12 @@ type ChunkProverTask struct { } // NewChunkProverTask new a chunk prover task -func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *ChunkProverTask { - forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg) - log.Info("new chunk prover task", "forkHeights", forkHeights, "nameForks", nameForkMap) +func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProverTask { cp := &ChunkProverTask{ BaseProverTask: BaseProverTask{ - vkMap: vkMap, - reverseVkMap: reverseMap(vkMap), db: db, cfg: cfg, - nameForkMap: nameForkMap, - forkHeights: forkHeights, + chainCfg: chainCfg, chunkOrm: orm.NewChunk(db), blockOrm: orm.NewL2Block(db), proverTaskOrm: orm.NewProverTask(db), @@ -63,13 +57,11 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go return cp } -type getHardForkNameByChunkFunc func(*orm.Chunk) (string, error) - -func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCtx *proverTaskContext, - blockRange *blockRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByChunkFunc) (*coordinatorType.GetTaskSchema, error) { - fromBlockNum, toBlockNum := blockRange.from, blockRange.to - if toBlockNum > getTaskParameter.ProverHeight { - toBlockNum = getTaskParameter.ProverHeight + 1 +// Assign the chunk proof which need to prove +func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) { + taskCtx, err := cp.checkParameter(ctx) + if err != nil || taskCtx == nil { + return nil, fmt.Errorf("check prover task parameter failed, error:%w", err) } maxActiveAttempts := cp.cfg.ProverManager.ProversPerSession @@ -78,7 +70,7 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt for i := 0; i < 5; i++ { var getTaskError error var tmpChunkTask *orm.Chunk - tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts) + tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts) if getTaskError != nil { log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError) return nil, ErrCoordinatorInternalFailure @@ -87,7 +79,7 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt // Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned` // chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql. if tmpChunkTask == nil { - tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts) + tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts) if getTaskError != nil { log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError) return nil, ErrCoordinatorInternalFailure @@ -120,17 +112,12 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt } log.Info("start chunk generation session", "task_id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName) - var ( - proverVersion = taskCtx.ProverVersion - hardForkName = taskCtx.HardForkName - err error - ) - if getHardForkName != nil { - hardForkName, err = getHardForkName(chunkTask) - if err != nil { - log.Error("failed to get hard fork name by chunk", "task_id", chunkTask.Hash, "error", err.Error()) - return nil, ErrCoordinatorInternalFailure - } + + hardForkName, getHardForkErr := cp.hardForkName(ctx, chunkTask) + if getHardForkErr != nil { + cp.recoverActiveAttempts(ctx, chunkTask) + log.Error("retrieve hard fork name by chunk failed", "task_id", chunkTask.Hash, "err", getHardForkErr) + return nil, ErrCoordinatorInternalFailure } proverTask := orm.ProverTask{ @@ -138,10 +125,10 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt ProverPublicKey: taskCtx.PublicKey, TaskType: int16(message.ProofTypeChunk), ProverName: taskCtx.ProverName, - ProverVersion: proverVersion, + ProverVersion: taskCtx.ProverVersion, ProvingStatus: int16(types.ProverAssigned), FailureType: int16(types.ProverTaskFailureTypeUndefined), - // here why need use UTC time. see scroll/common/databased/db.go + // here why need use UTC time. see scroll/common/database/db.go AssignedAt: utils.NowUTC(), } @@ -151,7 +138,7 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt return nil, ErrCoordinatorInternalFailure } - taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask) + taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, hardForkName) if err != nil { cp.recoverActiveAttempts(ctx, chunkTask) log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err) @@ -168,96 +155,16 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt return taskMsg, nil } -func (cp *ChunkProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) { - blockRange, err := cp.getBlockRangeByName(taskCtx.HardForkName) - if err != nil { - return nil, err - } - return cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, nil) -} - -func (cp *ChunkProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) { - var ( - hardForkNames [2]string - blockRanges [2]*blockRange - err error - ) - for i := 0; i < 2; i++ { - hardForkNames[i] = cp.reverseVkMap[getTaskParameter.VKs[i]] - blockRanges[i], err = cp.getBlockRangeByName(hardForkNames[i]) - if err != nil { - return nil, err - } - } - blockRange, err := blockRanges[0].merge(*blockRanges[1]) - if err != nil { - return nil, err - } - var hardForkName string - getHardForkName := func(chunk *orm.Chunk) (string, error) { - for i := 0; i < 2; i++ { - if blockRanges[i].contains(chunk.StartBlockNumber, chunk.EndBlockNumber) { - hardForkName = hardForkNames[i] - break - } - } - if hardForkName == "" { - log.Warn("get chunk not belongs to any hard fork name", "chunk id", chunk.Index) - return "", fmt.Errorf("get chunk not belongs to any hard fork name, chunk id: %d", chunk.Index) - } - return hardForkName, nil - } - schema, err := cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, getHardForkName) - if schema != nil && err == nil { - schema.HardForkName = hardForkName - return schema, nil - } - return schema, err -} - -type blockRange struct { - from uint64 - to uint64 -} - -func (r *blockRange) merge(o blockRange) (*blockRange, error) { - if r.from == o.to { - return &blockRange{o.from, r.to}, nil - } else if r.to == o.from { - return &blockRange{r.from, o.to}, nil - } - return nil, errors.New("two ranges are not adjacent") -} - -func (r *blockRange) contains(start, end uint64) bool { - return r.from <= start && r.to > end -} - -func (cp *ChunkProverTask) getBlockRangeByName(hardForkName string) (*blockRange, error) { - hardForkNumber, err := cp.getHardForkNumberByName(hardForkName) - if err != nil { - log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", hardForkName) - return nil, err - } - - fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, cp.forkHeights) - return &blockRange{fromBlockNum, toBlockNum}, nil -} - -// Assign the chunk proof which need to prove -func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) { - taskCtx, err := cp.checkParameter(ctx, getTaskParameter) - if err != nil || taskCtx == nil { - return nil, fmt.Errorf("check prover task parameter failed, error:%w", err) - } - - if len(getTaskParameter.VKs) > 0 { - return cp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter) +func (cp *ChunkProverTask) hardForkName(ctx *gin.Context, chunkTask *orm.Chunk) (string, error) { + l2Block, getBlockErr := cp.blockOrm.GetL2BlockByNumber(ctx.Copy(), chunkTask.StartBlockNumber) + if getBlockErr != nil { + return "", getBlockErr } - return cp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter) + hardForkName := forks.GetHardforkName(cp.chainCfg, l2Block.Number, l2Block.BlockTimestamp) + return hardForkName, nil } -func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) { +func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, hardForkName string) (*coordinatorType.GetTaskSchema, error) { // Get block hashes. blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID) if dbErr != nil || len(blockHashes) == 0 { @@ -273,10 +180,11 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove } proverTaskSchema := &coordinatorType.GetTaskSchema{ - UUID: task.UUID.String(), - TaskID: task.TaskID, - TaskType: int(message.ProofTypeChunk), - TaskData: string(blockHashesBytes), + UUID: task.UUID.String(), + TaskID: task.TaskID, + TaskType: int(message.ProofTypeChunk), + TaskData: string(blockHashesBytes), + HardForkName: hardForkName, } return proverTaskSchema, nil diff --git a/coordinator/internal/logic/provertask/prover_task.go b/coordinator/internal/logic/provertask/prover_task.go index 10da144847..de3a6f3552 100644 --- a/coordinator/internal/logic/provertask/prover_task.go +++ b/coordinator/internal/logic/provertask/prover_task.go @@ -8,11 +8,9 @@ import ( "github.com/gin-gonic/gin" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/params" "gorm.io/gorm" - "scroll-tech/common/version" - "scroll-tech/coordinator/internal/config" "scroll-tech/coordinator/internal/orm" coordinatorType "scroll-tech/coordinator/internal/types" @@ -21,8 +19,11 @@ import ( var ( // ErrCoordinatorInternalFailure coordinator internal db failure ErrCoordinatorInternalFailure = errors.New("coordinator internal error") - // ErrHardForkName indicates client request with the wrong hard fork name - ErrHardForkName = errors.New("wrong hard fork name") +) + +var ( + getTaskCounterInitOnce sync.Once + getTaskCounterVec *prometheus.CounterVec = nil ) // ProverTask the interface of a collector who send data to prover @@ -30,30 +31,15 @@ type ProverTask interface { Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) } -func reverseMap(input map[string]string) map[string]string { - output := make(map[string]string, len(input)) - for k, v := range input { - if k != "" { - output[v] = k - } - } - return output -} - // BaseProverTask a base prover task which contain series functions type BaseProverTask struct { - cfg *config.Config - db *gorm.DB - - // key is hardForkName, value is vk - vkMap map[string]string - // key is vk, value is hardForkName - reverseVkMap map[string]string - nameForkMap map[string]uint64 - forkHeights []uint64 + cfg *config.Config + chainCfg *params.ChainConfig + db *gorm.DB batchOrm *orm.Batch chunkOrm *orm.Chunk + bundleOrm *orm.Bundle blockOrm *orm.L2Block proverTaskOrm *orm.ProverTask proverBlockListOrm *orm.ProverBlockList @@ -63,11 +49,10 @@ type proverTaskContext struct { PublicKey string ProverName string ProverVersion string - HardForkName string } // checkParameter check the prover task parameter illegal -func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*proverTaskContext, error) { +func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, error) { var ptc proverTaskContext publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey) @@ -88,44 +73,6 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor } ptc.ProverVersion = proverVersion.(string) - if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) { - return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string)) - } - - // signals that the prover is multi-circuits version - if len(getTaskParameter.VKs) > 0 { - if len(getTaskParameter.VKs) != 2 { - return nil, errors.New("parameter vks length must be 2") - } - for _, vk := range getTaskParameter.VKs { - if _, exists := b.reverseVkMap[vk]; !exists { - return nil, fmt.Errorf("incompatible vk. vk %s is invalid", vk) - } - } - } else { - hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName) - if !hardForkNameExist { - return nil, errors.New("get hard fork name from context failed") - } - ptc.HardForkName = hardForkName.(string) - - vk, vkExist := b.vkMap[ptc.HardForkName] - if !vkExist { - return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap) - } - - // if the prover has a different vk - if getTaskParameter.VK != vk { - log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName) - // if the prover reports a different prover version - if !version.CheckScrollProverVersion(proverVersion.(string)) { - return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string)) - } - // if the prover reports a same prover version - return nil, errors.New("incompatible vk. please check your params files or config files") - } - } - isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string)) if err != nil { return nil, fmt.Errorf("failed to check whether the public key %s is blocked before assigning a chunk task, err: %w, proverName: %s, proverVersion: %s", publicKey, err, proverName, proverVersion) @@ -145,26 +92,6 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor return &ptc, nil } -func (b *BaseProverTask) getHardForkNumberByName(forkName string) (uint64, error) { - // when the first hard fork upgrade, the prover don't pass the fork_name to coordinator. - // so coordinator need to be compatible. - if forkName == "" { - return 0, nil - } - - hardForkNumber, exist := b.nameForkMap[forkName] - if !exist { - return 0, ErrHardForkName - } - - return hardForkNumber, nil -} - -var ( - getTaskCounterInitOnce sync.Once - getTaskCounterVec *prometheus.CounterVec = nil -) - func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec { getTaskCounterInitOnce.Do(func() { getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{ diff --git a/coordinator/internal/logic/submitproof/proof_receiver.go b/coordinator/internal/logic/submitproof/proof_receiver.go index b437799dd3..c8450a7d54 100644 --- a/coordinator/internal/logic/submitproof/proof_receiver.go +++ b/coordinator/internal/logic/submitproof/proof_receiver.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "errors" - "fmt" "strings" "time" @@ -12,8 +11,10 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/params" "gorm.io/gorm" + "scroll-tech/common/forks" "scroll-tech/common/types" "scroll-tech/common/types/message" @@ -38,6 +39,8 @@ var ( ErrValidatorFailureVerifiedFailed = errors.New("verification failed, verifier returns error") // ErrValidatorSuccessInvalidProof successful verified and the proof is invalid ErrValidatorSuccessInvalidProof = errors.New("verification succeeded, it's an invalid proof") + // ErrGetHardForkNameFailed failed to get hard fork name + ErrGetHardForkNameFailed = errors.New("failed to get hard fork name") // ErrCoordinatorInternalFailure coordinator internal db failure ErrCoordinatorInternalFailure = errors.New("coordinator internal error") ) @@ -46,10 +49,13 @@ var ( type ProofReceiverLogic struct { chunkOrm *orm.Chunk batchOrm *orm.Batch + bundleOrm *orm.Bundle + blockOrm *orm.L2Block proverTaskOrm *orm.ProverTask - db *gorm.DB - cfg *config.ProverManager + db *gorm.DB + cfg *config.ProverManager + chainCfg *params.ChainConfig verifier *verifier.Verifier @@ -66,14 +72,17 @@ type ProofReceiverLogic struct { } // NewSubmitProofReceiverLogic create a proof receiver logic -func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *ProofReceiverLogic { +func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *ProofReceiverLogic { return &ProofReceiverLogic{ chunkOrm: orm.NewChunk(db), batchOrm: orm.NewBatch(db), + bundleOrm: orm.NewBundle(db), + blockOrm: orm.NewL2Block(db), proverTaskOrm: orm.NewProverTask(db), - cfg: cfg, - db: db, + cfg: cfg, + chainCfg: chainCfg, + db: db, verifier: vf, @@ -124,7 +133,7 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, vf *ver // HandleZkProof handle a ZkProof submitted from a prover. // For now only proving/verifying error will lead to setting status as skipped. // db/unmarshal errors will not because they are errors on the business logic side. -func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) error { +func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coordinatorType.SubmitProofParameter) error { m.proofReceivedTotal.Inc() pk := ctx.GetString(coordinatorType.PublicKey) if len(pk) == 0 { @@ -134,37 +143,20 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P if len(pv) == 0 { return errors.New("get ProverVersion from context failed") } - // use hard_fork_name from parameter first - // if prover support multi hard_forks, the real hard_fork_name is not set to the gin context - hardForkName := proofParameter.HardForkName - if hardForkName == "" { - hardForkName = ctx.GetString(coordinatorType.HardForkName) - } - var proverTask *orm.ProverTask - var err error - if proofParameter.UUID != "" { - proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx.Copy(), proofParameter.UUID, pk) - if proverTask == nil || err != nil { - log.Error("get none prover task for the proof", "uuid", proofParameter.UUID, "key", pk, "taskID", proofMsg.ID, "error", err) - return ErrValidatorFailureProverTaskEmpty - } - } else { - // TODO When prover all have upgrade, need delete this logic - proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx.Copy(), proofMsg.Type, proofMsg.ID, pk, pv) - if proverTask == nil || err != nil { - log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err) - return ErrValidatorFailureProverTaskEmpty - } + proverTask, err := m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx.Copy(), proofParameter.UUID, pk) + if proverTask == nil || err != nil { + log.Error("get none prover task for the proof", "uuid", proofParameter.UUID, "key", pk, "taskID", proofParameter.TaskID, "error", err) + return ErrValidatorFailureProverTaskEmpty } proofTime := time.Since(proverTask.CreatedAt) proofTimeSec := uint64(proofTime.Seconds()) - log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName, - "proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec, "hardForkName", hardForkName) + log.Info("handling zk proof", "proofID", proofParameter.TaskID, "proverName", proverTask.ProverName, + "proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec) - if err = m.validator(ctx.Copy(), proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil { + if err = m.validator(ctx.Copy(), proverTask, pk, proofParameter); err != nil { return err } @@ -172,18 +164,39 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P success := true var verifyErr error - // only verify batch proof. chunk proof verifier have been disabled after Bernoulli - if proofMsg.Type == message.ProofTypeBatch { - success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof, hardForkName) + hardForkName, getHardForkErr := m.hardForkName(ctx, proofParameter.TaskID, proofParameter.TaskType) + if getHardForkErr != nil { + return ErrGetHardForkNameFailed + } + + switch message.ProofType(proofParameter.TaskType) { + case message.ProofTypeChunk: + var chunkProof message.ChunkProof + if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &chunkProof); unmarshalErr != nil { + return unmarshalErr + } + success, verifyErr = m.verifier.VerifyChunkProof(&chunkProof, hardForkName) + case message.ProofTypeBatch: + var batchProof message.BatchProof + if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &batchProof); unmarshalErr != nil { + return unmarshalErr + } + success, verifyErr = m.verifier.VerifyBatchProof(&batchProof, hardForkName) + case message.ProofTypeBundle: + var bundleProof message.BundleProof + if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &bundleProof); unmarshalErr != nil { + return unmarshalErr + } + success, verifyErr = m.verifier.VerifyBundleProof(&bundleProof) } if verifyErr != nil || !success { m.verifierFailureTotal.WithLabelValues(pv).Inc() - m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg) + m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofParameter) - log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName, - "prover pk", pk, "forkName", hardForkName, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr) + log.Info("proof verified by coordinator failed", "proof id", proofParameter.TaskID, "prover name", proverTask.ProverName, + "prover pk", pk, "prove type", proofParameter.TaskType, "proof time", proofTimeSec, "error", verifyErr) if verifyErr != nil { return ErrValidatorFailureVerifiedFailed @@ -193,13 +206,13 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds()) - log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName, - "prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "forkName", hardForkName) + log.Info("proof verified and valid", "proof id", proofParameter.TaskID, "prover name", proverTask.ProverName, + "prover pk", pk, "prove type", proofParameter.TaskType, "proof time", proofTimeSec) - if err := m.closeProofTask(ctx.Copy(), proverTask, proofMsg, proofTimeSec); err != nil { + if err := m.closeProofTask(ctx.Copy(), proverTask, proofParameter, proofTimeSec); err != nil { m.proofSubmitFailure.Inc() - m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeServerError, proofMsg) + m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeServerError, proofParameter) return ErrCoordinatorInternalFailure } @@ -212,7 +225,6 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch if err != nil { return err } - allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(ctx, batch.BatchHash) if err != nil { return err @@ -226,7 +238,7 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch return nil } -func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter, forkName string) (err error) { +func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofParameter coordinatorType.SubmitProofParameter) (err error) { defer func() { if err != nil { m.validateFailureTotal.Inc() @@ -243,9 +255,9 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov // (ii) set the maximum failure retry times log.Warn( "cannot submit valid proof for a prover task twice", - "taskType", proverTask.TaskType, "hash", proofMsg.ID, + "taskType", proverTask.TaskType, "hash", proofParameter.TaskID, "proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion, - "proverPublicKey", proverTask.ProverPublicKey, "forkName", forkName, + "proverPublicKey", proverTask.ProverPublicKey, ) return ErrValidatorFailureProverTaskCannotSubmitTwice } @@ -253,61 +265,60 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov proofTime := time.Since(proverTask.CreatedAt) proofTimeSec := uint64(proofTime.Seconds()) - if proofMsg.Status != message.StatusOk { + if proofParameter.Status != int(message.StatusOk) { // Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs. failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1) - m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeSubmitStatusNotOk, proofMsg) + m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeSubmitStatusNotOk, proofParameter) m.validateFailureProverTaskStatusNotOk.Inc() log.Info("proof generated by prover failed", - "taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName, + "taskType", proofParameter.TaskType, "hash", proofParameter.TaskID, "proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType, - "failureMessage", failureMsg, "forkName", forkName) + "failureMessage", failureMsg) return ErrValidatorFailureProofMsgStatusNotOk } // if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout { m.validateFailureProverTaskTimeout.Inc() - log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType, - "proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec, "forkName", forkName) + log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofParameter.TaskID, "taskType", proverTask.TaskType, + "proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec) return ErrValidatorFailureProofTimeout } // store the proof to prover task - if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil { - log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk, "forkName", forkName, + if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofParameter); updateTaskProofErr != nil { + log.Warn("update prover task proof failure", "hash", proofParameter.TaskID, "proverPublicKey", pk, "taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr) } // if the batch/chunk have proved and verifier success, need skip this submit proof - if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) { - m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeObjectAlreadyVerified, proofMsg) - + if m.checkIsTaskSuccess(ctx, proofParameter.TaskID, message.ProofType(proofParameter.TaskType)) { + m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeObjectAlreadyVerified, proofParameter) m.validateFailureProverTaskHaveVerifier.Inc() - log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID, - "taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk, "forkName", forkName) + log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofParameter.TaskID, + "taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk) return ErrValidatorFailureTaskHaveVerifiedSuccess } return nil } -func (m *ProofReceiverLogic) proofRecover(ctx context.Context, proverTask *orm.ProverTask, failureType types.ProverTaskFailureType, proofMsg *message.ProofMsg) { +func (m *ProofReceiverLogic) proofRecover(ctx context.Context, proverTask *orm.ProverTask, failureType types.ProverTaskFailureType, proofParameter coordinatorType.SubmitProofParameter) { log.Info("proof recover update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskUnassigned.String()) - if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProverProofInvalid, failureType, 0); err != nil { + if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofInvalid, failureType, 0); err != nil { log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", proverTask.TaskID, "pubKey", proverTask.ProverPublicKey, "error", err) } } -func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg, proofTimeSec uint64) error { +func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm.ProverTask, proofParameter coordinatorType.SubmitProofParameter, proofTimeSec uint64) error { log.Info("proof close task update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskVerified.String()) - if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProverProofValid, types.ProverTaskFailureTypeUndefined, proofTimeSec); err != nil { + if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofValid, types.ProverTaskFailureTypeUndefined, proofTimeSec); err != nil { log.Error("failed to updated proof status ProvingTaskVerified", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "error", err) return err } @@ -316,14 +327,14 @@ func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm // UpdateProofStatus update the chunk/batch task and session info status func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *orm.ProverTask, - proofMsg *message.ProofMsg, status types.ProverProveStatus, failureType types.ProverTaskFailureType, proofTimeSec uint64) error { + proofParameter coordinatorType.SubmitProofParameter, status types.ProverProveStatus, failureType types.ProverTaskFailureType, proofTimeSec uint64) error { err := m.db.Transaction(func(tx *gorm.DB) error { if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatusAndFailureType(ctx, proverTask.UUID, status, failureType, tx); updateErr != nil { log.Error("failed to update prover task proving status and failure type", "uuid", proverTask.UUID, "error", updateErr) return updateErr } - switch proofMsg.Type { + switch message.ProofType(proofParameter.TaskType) { case message.ProofTypeChunk: if err := m.chunkOrm.DecreaseActiveAttemptsByHash(ctx, proverTask.TaskID, tx); err != nil { log.Error("failed to update chunk proving_status as failed", "hash", proverTask.TaskID, "error", err) @@ -334,21 +345,28 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask * log.Error("failed to update batch proving_status as failed", "hash", proverTask.TaskID, "error", err) return err } + case message.ProofTypeBundle: + if err := m.bundleOrm.DecreaseActiveAttemptsByHash(ctx, proverTask.TaskID, tx); err != nil { + log.Error("failed to update bundle proving_status as failed", "hash", proverTask.TaskID, "error", err) + return err + } } // if the block batch has proof verified, so the failed status not update block batch proving status - if m.checkIsTaskSuccess(ctx, proverTask.TaskID, proofMsg.Type) { + if m.checkIsTaskSuccess(ctx, proverTask.TaskID, message.ProofType(proofParameter.TaskType)) { log.Info("update proof status skip because this chunk/batch has been verified", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey) return nil } if status == types.ProverProofValid { var storeProofErr error - switch proofMsg.Type { + switch message.ProofType(proofParameter.TaskType) { case message.ProofTypeChunk: - storeProofErr = m.chunkOrm.UpdateProofAndProvingStatusByHash(ctx, proofMsg.ID, proofMsg.ChunkProof, types.ProvingTaskVerified, proofTimeSec, tx) + storeProofErr = m.chunkOrm.UpdateProofAndProvingStatusByHash(ctx, proofParameter.TaskID, []byte(proofParameter.Proof), types.ProvingTaskVerified, proofTimeSec, tx) case message.ProofTypeBatch: - storeProofErr = m.batchOrm.UpdateProofAndProvingStatusByHash(ctx, proofMsg.ID, proofMsg.BatchProof, types.ProvingTaskVerified, proofTimeSec, tx) + storeProofErr = m.batchOrm.UpdateProofAndProvingStatusByHash(ctx, proofParameter.TaskID, []byte(proofParameter.Proof), types.ProvingTaskVerified, proofTimeSec, tx) + case message.ProofTypeBundle: + storeProofErr = m.bundleOrm.UpdateProofAndProvingStatusByHash(ctx, proofParameter.TaskID, []byte(proofParameter.Proof), types.ProvingTaskVerified, proofTimeSec, tx) } if storeProofErr != nil { log.Error("failed to store chunk/batch proof and proving status", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey, "error", storeProofErr) @@ -362,7 +380,7 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask * return err } - if status == types.ProverProofValid && proofMsg.Type == message.ProofTypeChunk { + if status == types.ProverProofValid && message.ProofType(proofParameter.TaskType) == message.ProofTypeChunk { if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, proverTask.TaskID); checkReadyErr != nil { log.Error("failed to check are all chunk proofs ready", "error", checkReadyErr) return checkReadyErr @@ -387,24 +405,63 @@ func (m *ProofReceiverLogic) checkIsTaskSuccess(ctx context.Context, hash string if err != nil { return false } + case message.ProofTypeBundle: + provingStatus, err = m.bundleOrm.GetProvingStatusByHash(ctx, hash) + if err != nil { + return false + } } - return provingStatus == types.ProvingTaskVerified } -func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg) error { - // store the proof to prover task - var proofBytes []byte - var marshalErr error - switch proofMsg.Type { +func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, proverTask *orm.ProverTask, proofParameter coordinatorType.SubmitProofParameter) error { + return m.proverTaskOrm.UpdateProverTaskProof(ctx, proverTask.UUID, []byte(proofParameter.Proof)) +} + +func (m *ProofReceiverLogic) hardForkName(ctx *gin.Context, hash string, proofType int) (string, error) { + var ( + bundle *orm.Bundle + batch *orm.Batch + chunk *orm.Chunk + err error + ) + + switch message.ProofType(proofType) { case message.ProofTypeChunk: - proofBytes, marshalErr = json.Marshal(proofMsg.ChunkProof) + chunk, err = m.chunkOrm.GetChunkByHash(ctx, hash) case message.ProofTypeBatch: - proofBytes, marshalErr = json.Marshal(proofMsg.BatchProof) + batch, err = m.batchOrm.GetBatchByHash(ctx, hash) + case message.ProofTypeBundle: + bundle, err = m.bundleOrm.GetBundleByHash(ctx, hash) + } + + if err != nil { + return "", err + } + + if bundle != nil { + batch, err = m.batchOrm.GetBatchByHash(ctx, bundle.StartBatchHash) + if err != nil { + return "", err + } } - if len(proofBytes) == 0 || marshalErr != nil { - return fmt.Errorf("updateProverTaskProof marshal proof error:%w", marshalErr) + if batch != nil { + chunk, err = m.chunkOrm.GetChunkByHash(ctx, batch.StartChunkHash) + if err != nil { + return "", err + } } - return m.proverTaskOrm.UpdateProverTaskProof(ctx, proverTask.UUID, proofBytes) + + if chunk == nil { + return "", errors.New("failed to find chunk") + } + + l2Block, getBlockErr := m.blockOrm.GetL2BlockByNumber(ctx.Copy(), chunk.StartBlockNumber) + if getBlockErr != nil { + return "", getBlockErr + } + + hardForkName := forks.GetHardforkName(m.chainCfg, l2Block.Number, l2Block.BlockTimestamp) + return hardForkName, nil } diff --git a/coordinator/internal/logic/verifier/legacy_vk/agg_vk.vkey b/coordinator/internal/logic/verifier/legacy_vk/agg_vk.vkey index f6a905f183..0b4ab2657c 100644 Binary files a/coordinator/internal/logic/verifier/legacy_vk/agg_vk.vkey and b/coordinator/internal/logic/verifier/legacy_vk/agg_vk.vkey differ diff --git a/coordinator/internal/logic/verifier/legacy_vk/chunk_vk.vkey b/coordinator/internal/logic/verifier/legacy_vk/chunk_vk.vkey index 3bccfcd986..7318195ddf 100644 Binary files a/coordinator/internal/logic/verifier/legacy_vk/chunk_vk.vkey and b/coordinator/internal/logic/verifier/legacy_vk/chunk_vk.vkey differ diff --git a/coordinator/internal/logic/verifier/legacy_vk/upgrade_vks.sh b/coordinator/internal/logic/verifier/legacy_vk/upgrade_vks.sh new file mode 100755 index 0000000000..bc5fad9bd4 --- /dev/null +++ b/coordinator/internal/logic/verifier/legacy_vk/upgrade_vks.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +work_dir="$(dirname -- "${BASH_SOURCE[0]}")" +work_dir="$(cd -- "$work_dir" && pwd)" +echo $work_dir + +rm $work_dir/*.vkey + +version=release-v0.11.4 +wget https://circuit-release.s3.us-west-2.amazonaws.com/${version}/chunk_vk.vkey -O $work_dir/chunk_vk.vkey +wget https://circuit-release.s3.us-west-2.amazonaws.com/${version}/agg_vk.vkey -O $work_dir/agg_vk.vkey \ No newline at end of file diff --git a/coordinator/internal/logic/verifier/mock.go b/coordinator/internal/logic/verifier/mock.go index 114a452399..7615c51b6e 100644 --- a/coordinator/internal/logic/verifier/mock.go +++ b/coordinator/internal/logic/verifier/mock.go @@ -10,29 +10,13 @@ import ( // NewVerifier Sets up a mock verifier. func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) { - batchVKMap := map[string]string{ - "shanghai": "", - "bernoulli": "", - "london": "", - "istanbul": "", - "homestead": "", - "eip155": "", - } - chunkVKMap := map[string]string{ - "shanghai": "", - "bernoulli": "", - "london": "", - "istanbul": "", - "homestead": "", - "eip155": "", - } - batchVKMap[cfg.ForkName] = "" - chunkVKMap[cfg.ForkName] = "" + batchVKMap := map[string]string{cfg.ForkName: "mock_vk"} + chunkVKMap := map[string]string{cfg.ForkName: "mock_vk"} return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil } // VerifyChunkProof return a mock verification result for a ChunkProof. -func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) { +func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof, forkName string) (bool, error) { if string(proof.Proof) == InvalidTestProof { return false, nil } @@ -46,3 +30,11 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) } return true, nil } + +// VerifyBundleProof return a mock verification result for a BundleProof. +func (v *Verifier) VerifyBundleProof(proof *message.BundleProof) (bool, error) { + if string(proof.Proof) == InvalidTestProof { + return false, nil + } + return true, nil +} diff --git a/coordinator/internal/logic/verifier/types.go b/coordinator/internal/logic/verifier/types.go index 854e926fb5..9d4fdba931 100644 --- a/coordinator/internal/logic/verifier/types.go +++ b/coordinator/internal/logic/verifier/types.go @@ -9,7 +9,8 @@ const InvalidTestProof = "this is a invalid proof" // Verifier represents a rust ffi to a halo2 verifier. type Verifier struct { - cfg *config.VerifierConfig - ChunkVKMap map[string]string - BatchVKMap map[string]string + cfg *config.VerifierConfig + ChunkVKMap map[string]string + BatchVKMap map[string]string + BundleVkMap map[string]string } diff --git a/coordinator/internal/logic/verifier/verifier.go b/coordinator/internal/logic/verifier/verifier.go index 36a3d47c48..b394bfba92 100644 --- a/coordinator/internal/logic/verifier/verifier.go +++ b/coordinator/internal/logic/verifier/verifier.go @@ -22,59 +22,51 @@ import ( "github.com/scroll-tech/go-ethereum/log" - "scroll-tech/coordinator/internal/config" - "scroll-tech/common/types/message" + + "scroll-tech/coordinator/internal/config" ) // NewVerifier Sets up a rust ffi to call verify. func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) { if cfg.MockMode { - batchVKMap := map[string]string{ - "shanghai": "", - "bernoulli": "", - "london": "", - "istanbul": "", - "homestead": "", - "eip155": "", - } - chunkVKMap := map[string]string{ - "shanghai": "", - "bernoulli": "", - "london": "", - "istanbul": "", - "homestead": "", - "eip155": "", - } - - batchVKMap[cfg.ForkName] = "" - chunkVKMap[cfg.ForkName] = "" - return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil + chunkVKMap := map[string]string{cfg.ForkName: "mock_vk"} + batchVKMap := map[string]string{cfg.ForkName: "mock_vk"} + bundleVKMap := map[string]string{cfg.ForkName: "mock_vk"} + return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap, BundleVkMap: bundleVKMap}, nil } paramsPathStr := C.CString(cfg.ParamsPath) - assetsPathStr := C.CString(cfg.AssetsPath) + assetsPathLoStr := C.CString(cfg.AssetsPathLo) + assetsPathHiStr := C.CString(cfg.AssetsPathHi) defer func() { C.free(unsafe.Pointer(paramsPathStr)) - C.free(unsafe.Pointer(assetsPathStr)) + C.free(unsafe.Pointer(assetsPathLoStr)) + C.free(unsafe.Pointer(assetsPathHiStr)) }() - C.init_batch_verifier(paramsPathStr, assetsPathStr) - C.init_chunk_verifier(paramsPathStr, assetsPathStr) + C.init_batch_verifier(paramsPathStr, assetsPathHiStr) + C.init_chunk_verifier(paramsPathStr, assetsPathLoStr, assetsPathHiStr) v := &Verifier{ - cfg: cfg, - ChunkVKMap: make(map[string]string), - BatchVKMap: make(map[string]string), + cfg: cfg, + ChunkVKMap: make(map[string]string), + BatchVKMap: make(map[string]string), + BundleVkMap: make(map[string]string), } - batchVK, err := v.readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey")) + bundleVK, err := v.readVK(path.Join(cfg.AssetsPathHi, "vk_bundle.vkey")) if err != nil { return nil, err } - chunkVK, err := v.readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey")) + batchVK, err := v.readVK(path.Join(cfg.AssetsPathHi, "vk_batch.vkey")) if err != nil { return nil, err } + chunkVK, err := v.readVK(path.Join(cfg.AssetsPathHi, "vk_chunk.vkey")) + if err != nil { + return nil, err + } + v.BundleVkMap[cfg.ForkName] = bundleVK v.BatchVKMap[cfg.ForkName] = batchVK v.ChunkVKMap[cfg.ForkName] = chunkVK @@ -112,7 +104,34 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) } // VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier. -func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) { +func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof, forkName string) (bool, error) { + if v.cfg.MockMode { + log.Info("Mock mode, verifier disabled") + if string(proof.Proof) == InvalidTestProof { + return false, nil + } + return true, nil + + } + buf, err := json.Marshal(proof) + if err != nil { + return false, err + } + + log.Info("Start to verify chunk proof", "forkName", forkName) + proofStr := C.CString(string(buf)) + forkNameStr := C.CString(forkName) + defer func() { + C.free(unsafe.Pointer(proofStr)) + C.free(unsafe.Pointer(forkNameStr)) + }() + + verified := C.verify_chunk_proof(proofStr, forkNameStr) + return verified != 0, nil +} + +// VerifyBundleProof Verify a ZkProof for a bundle of batches, by marshaling it and verifying it via the EVM verifier. +func (v *Verifier) VerifyBundleProof(proof *message.BundleProof) (bool, error) { if v.cfg.MockMode { log.Info("Mock mode, verifier disabled") if string(proof.Proof) == InvalidTestProof { @@ -131,8 +150,8 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) { C.free(unsafe.Pointer(proofStr)) }() - log.Info("Start to verify chunk proof ...") - verified := C.verify_chunk_proof(proofStr) + log.Info("Start to verify bundle proof ...") + verified := C.verify_bundle_proof(proofStr) return verified != 0, nil } @@ -164,7 +183,7 @@ func (v *Verifier) loadEmbedVK() error { return err } - v.BatchVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(batchVKBytes) - v.ChunkVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(chunkVkBytes) + v.BatchVKMap["curie"] = base64.StdEncoding.EncodeToString(batchVKBytes) + v.ChunkVKMap["curie"] = base64.StdEncoding.EncodeToString(chunkVkBytes) return nil } diff --git a/coordinator/internal/logic/verifier/verifier_test.go b/coordinator/internal/logic/verifier/verifier_test.go index 0e79b9c4b9..4b0be9fcb4 100644 --- a/coordinator/internal/logic/verifier/verifier_test.go +++ b/coordinator/internal/logic/verifier/verifier_test.go @@ -18,7 +18,8 @@ import ( var ( paramsPath = flag.String("params", "/assets/test_params", "params dir") - assetsPath = flag.String("assets", "/assets/test_assets", "assets dir") + assetsPathLo = flag.String("assets", "/assets/test_assets_lo", "assets dir") + assetsPathHi = flag.String("assets", "/assets/test_assets_hi", "assets dir") batchProofPath = flag.String("batch_proof", "/assets/proof_data/batch_proof", "batch proof file path") chunkProofPath1 = flag.String("chunk_proof1", "/assets/proof_data/chunk_proof1", "chunk proof file path 1") chunkProofPath2 = flag.String("chunk_proof2", "/assets/proof_data/chunk_proof2", "chunk proof file path 2") @@ -28,9 +29,10 @@ func TestFFI(t *testing.T) { as := assert.New(t) cfg := &config.VerifierConfig{ - MockMode: false, - ParamsPath: *paramsPath, - AssetsPath: *assetsPath, + MockMode: false, + ParamsPath: *paramsPath, + AssetsPathLo: *assetsPathLo, + AssetsPathHi: *assetsPathHi, } v, err := NewVerifier(cfg) diff --git a/coordinator/internal/orm/batch.go b/coordinator/internal/orm/batch.go index 0e70f97444..a4f8bd77dc 100644 --- a/coordinator/internal/orm/batch.go +++ b/coordinator/internal/orm/batch.go @@ -2,7 +2,6 @@ package orm import ( "context" - "encoding/json" "errors" "fmt" "time" @@ -13,7 +12,6 @@ import ( "gorm.io/gorm" "scroll-tech/common/types" - "scroll-tech/common/types/message" "scroll-tech/common/utils" ) @@ -59,6 +57,9 @@ type Batch struct { BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"` BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"` + // bundle + BundleHash string `json:"bundle_hash" gorm:"column:bundle_hash"` + // metadata CreatedAt time.Time `json:"created_at" gorm:"column:created_at"` UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"` @@ -76,12 +77,12 @@ func (*Batch) TableName() string { } // GetUnassignedBatch retrieves unassigned batch based on the specified limit. -// The returned batch are sorted in ascending order by their index. -func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) { +// The returned batches are sorted in ascending order by their index. +func (o *Batch) GetUnassignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) { var batch Batch db := o.db.WithContext(ctx) - sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;", - int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex) + sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;", + int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady)) err := db.Raw(sql).Scan(&batch).Error if err != nil { return nil, fmt.Errorf("Batch.GetUnassignedBatch error: %w", err) @@ -93,12 +94,12 @@ func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChun } // GetAssignedBatch retrieves assigned batch based on the specified limit. -// The returned batch are sorted in ascending order by their index. -func (o *Batch) GetAssignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) { +// The returned batches are sorted in ascending order by their index. +func (o *Batch) GetAssignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) { var batch Batch db := o.db.WithContext(ctx) - sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;", - int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex) + sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;", + int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady)) err := db.Raw(sql).Scan(&batch).Error if err != nil { return nil, fmt.Errorf("Batch.GetAssignedBatch error: %w", err) @@ -184,6 +185,46 @@ func (o *Batch) GetAttemptsByHash(ctx context.Context, hash string) (int16, int1 return batch.ActiveAttempts, batch.TotalAttempts, nil } +// CheckIfBundleBatchProofsAreReady checks if all proofs for all batches of a given bundleHash are collected. +func (o *Batch) CheckIfBundleBatchProofsAreReady(ctx context.Context, bundleHash string) (bool, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Batch{}) + db = db.Where("bundle_hash = ? AND proving_status != ?", bundleHash, types.ProvingTaskVerified) + + var count int64 + if err := db.Count(&count).Error; err != nil { + return false, fmt.Errorf("Chunk.CheckIfBundleBatchProofsAreReady error: %w, bundle hash: %v", err, bundleHash) + } + return count == 0, nil +} + +// GetBatchByHash retrieves the given batch. +func (o *Batch) GetBatchByHash(ctx context.Context, hash string) (*Batch, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Batch{}) + db = db.Where("hash = ?", hash) + + var batch Batch + if err := db.First(&batch).Error; err != nil { + return nil, fmt.Errorf("Batch.GetBatchByHash error: %w, batch hash: %v", err, hash) + } + return &batch, nil +} + +// GetBatchesByBundleHash retrieves the given batch. +func (o *Batch) GetBatchesByBundleHash(ctx context.Context, bundleHash string) ([]*Batch, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Batch{}) + db = db.Where("bundle_hash = ?", bundleHash) + db = db.Order("index ASC") + + var batches []*Batch + if err := db.Find(&batches).Error; err != nil { + return nil, fmt.Errorf("Batch.GetBatchesByBundleHash error: %w, bundle hash: %v", err, bundleHash) + } + return batches, nil +} + // InsertBatch inserts a new batch into the database. func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...*gorm.DB) (*Batch, error) { if batch == nil { @@ -317,18 +358,14 @@ func (o *Batch) UpdateProvingStatusFailed(ctx context.Context, hash string, maxA } // UpdateProofAndProvingStatusByHash updates the batch proof and proving status by hash. -func (o *Batch) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.BatchProof, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error { +func (o *Batch) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof []byte, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error { db := o.db if len(dbTX) > 0 && dbTX[0] != nil { db = dbTX[0] } - proofBytes, err := json.Marshal(proof) - if err != nil { - return err - } updateFields := make(map[string]interface{}) - updateFields["proof"] = proofBytes + updateFields["proof"] = proof updateFields["proving_status"] = provingStatus updateFields["proof_time_sec"] = proofTimeSec updateFields["proved_at"] = utils.NowUTC() diff --git a/coordinator/internal/orm/bundle.go b/coordinator/internal/orm/bundle.go new file mode 100644 index 0000000000..5deeff1114 --- /dev/null +++ b/coordinator/internal/orm/bundle.go @@ -0,0 +1,228 @@ +package orm + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/scroll-tech/go-ethereum/log" + "gorm.io/gorm" + + "scroll-tech/common/types" + "scroll-tech/common/utils" +) + +// Bundle represents a bundle of batches. +type Bundle struct { + db *gorm.DB `gorm:"column:-"` + + Index uint64 `json:"index" gorm:"column:index"` + Hash string `json:"hash" gorm:"column:hash"` + StartBatchIndex uint64 `json:"start_batch_index" gorm:"column:start_batch_index"` + StartBatchHash string `json:"start_batch_hash" gorm:"column:start_batch_hash"` + EndBatchIndex uint64 `json:"end_batch_index" gorm:"column:end_batch_index"` + EndBatchHash string `json:"end_batch_hash" gorm:"column:end_batch_hash"` + + // proof + BatchProofsStatus int16 `json:"batch_proofs_status" gorm:"column:batch_proofs_status;default:1"` + ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"` + Proof []byte `json:"proof" gorm:"column:proof;default:NULL"` + ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"` + ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"` + TotalAttempts int16 `json:"total_attempts" gorm:"column:total_attempts;default:0"` + ActiveAttempts int16 `json:"active_attempts" gorm:"column:active_attempts;default:0"` + + // rollup + RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"` + FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"` + FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"` + + // metadata + CreatedAt time.Time `json:"created_at" gorm:"column:created_at"` + UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"` + DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"` +} + +// NewBundle creates a new Bundle database instance. +func NewBundle(db *gorm.DB) *Bundle { + return &Bundle{db: db} +} + +// TableName returns the table name for the Bundle model. +func (*Bundle) TableName() string { + return "bundle" +} + +// GetUnassignedBundle retrieves unassigned bundle based on the specified limit. +// The returned batch sorts in ascending order by their index. +func (o *Bundle) GetUnassignedBundle(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Bundle, error) { + var bundle Bundle + db := o.db.WithContext(ctx) + sql := fmt.Sprintf("SELECT * FROM bundle WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND batch_proofs_status = %d AND bundle.deleted_at IS NULL ORDER BY bundle.index LIMIT 1;", + int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.BatchProofsStatusReady)) + err := db.Raw(sql).Scan(&bundle).Error + if err != nil { + return nil, fmt.Errorf("Batch.GetUnassignedBundle error: %w", err) + } + if bundle.StartBatchHash == "" || bundle.EndBatchHash == "" { + return nil, nil + } + return &bundle, nil +} + +// GetAssignedBundle retrieves assigned bundle based on the specified limit. +// The returned bundle sorts in ascending order by their index. +func (o *Bundle) GetAssignedBundle(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Bundle, error) { + var bundle Bundle + db := o.db.WithContext(ctx) + sql := fmt.Sprintf("SELECT * FROM bundle WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND batch_proofs_status = %d AND bundle.deleted_at IS NULL ORDER BY bundle.index LIMIT 1;", + int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.BatchProofsStatusReady)) + err := db.Raw(sql).Scan(&bundle).Error + if err != nil { + return nil, fmt.Errorf("Bundle.GetAssignedBatch error: %w", err) + } + if bundle.StartBatchHash == "" || bundle.EndBatchHash == "" { + return nil, nil + } + return &bundle, nil +} + +// GetProvingStatusByHash retrieves the proving status of a bundle given its hash. +func (o *Bundle) GetProvingStatusByHash(ctx context.Context, hash string) (types.ProvingStatus, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Select("proving_status") + db = db.Where("hash = ?", hash) + + var bundle Bundle + if err := db.Find(&bundle).Error; err != nil { + return types.ProvingStatusUndefined, fmt.Errorf("Bundle.GetProvingStatusByHash error: %w, batch hash: %v", err, hash) + } + return types.ProvingStatus(bundle.ProvingStatus), nil +} + +// GetBundleByHash retrieves the given +func (o *Bundle) GetBundleByHash(ctx context.Context, bundleHash string) (*Bundle, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("hash = ?", bundleHash) + + var bundle Bundle + if err := db.First(&bundle).Error; err != nil { + return nil, fmt.Errorf("Bundle.GetBundleByHash error: %w, bundle hash: %v", err, bundleHash) + } + return &bundle, nil +} + +// GetUnassignedAndBatchesUnreadyBundles get the bundles which is unassigned and batches are not ready +func (o *Bundle) GetUnassignedAndBatchesUnreadyBundles(ctx context.Context, offset, limit int) ([]*Bundle, error) { + if offset < 0 || limit < 0 { + return nil, errors.New("limit and offset must not be smaller than 0") + } + + db := o.db.WithContext(ctx) + db = db.Where("proving_status = ?", types.ProvingTaskUnassigned) + db = db.Where("batch_proofs_status = ?", types.BatchProofsStatusPending) + db = db.Order("index ASC") + db = db.Offset(offset) + db = db.Limit(limit) + + var bundles []*Bundle + if err := db.Find(&bundles).Error; err != nil { + return nil, fmt.Errorf("Bundle.GetUnassignedAndBatchesUnreadyBundles error: %w", err) + } + return bundles, nil +} + +// UpdateBatchProofsStatusByBundleHash updates the status of batch_proofs_status field for a given bundle hash. +func (o *Bundle) UpdateBatchProofsStatusByBundleHash(ctx context.Context, bundleHash string, status types.BatchProofsStatus) error { + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("hash = ?", bundleHash) + + if err := db.Update("batch_proofs_status", status).Error; err != nil { + return fmt.Errorf("Bundle.UpdateBatchProofsStatusByBundleHash error: %w, bundle hash: %v, status: %v", err, bundleHash, status.String()) + } + return nil +} + +// UpdateProvingStatusFailed updates the proving status failed of a bundle. +func (o *Bundle) UpdateProvingStatusFailed(ctx context.Context, bundleHash string, maxAttempts uint8, dbTX ...*gorm.DB) error { + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("hash", bundleHash) + db = db.Where("total_attempts >= ?", maxAttempts) + db = db.Where("proving_status != ?", int(types.ProvingTaskVerified)) + if err := db.Update("proving_status", int(types.ProvingTaskFailed)).Error; err != nil { + return fmt.Errorf("Bundle.UpdateProvingStatus error: %w, bundle hash: %v, status: %v", err, bundleHash, types.ProvingTaskFailed.String()) + } + return nil +} + +// UpdateProofAndProvingStatusByHash updates the bundle proof and proving status by hash. +func (o *Bundle) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof []byte, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error { + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + + updateFields := make(map[string]interface{}) + updateFields["proof"] = proof + updateFields["proving_status"] = provingStatus + updateFields["proof_time_sec"] = proofTimeSec + updateFields["proved_at"] = utils.NowUTC() + + db = db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("hash", hash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash) + } + return nil +} + +// UpdateBundleAttempts atomically increments the attempts count for the earliest available bundle that meets the conditions. +func (o *Bundle) UpdateBundleAttempts(ctx context.Context, hash string, curActiveAttempts, curTotalAttempts int16) (int64, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("hash = ?", hash) + db = db.Where("active_attempts = ?", curActiveAttempts) + db = db.Where("total_attempts = ?", curTotalAttempts) + result := db.Updates(map[string]interface{}{ + "proving_status": types.ProvingTaskAssigned, + "total_attempts": gorm.Expr("total_attempts + 1"), + "active_attempts": gorm.Expr("active_attempts + 1"), + }) + + if result.Error != nil { + return 0, fmt.Errorf("failed to update bundle, err:%w", result.Error) + } + return result.RowsAffected, nil +} + +// DecreaseActiveAttemptsByHash decrements the active_attempts of a bundle given its hash. +func (o *Bundle) DecreaseActiveAttemptsByHash(ctx context.Context, bundleHash string, dbTX ...*gorm.DB) error { + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("hash = ?", bundleHash) + db = db.Where("proving_status != ?", int(types.ProvingTaskVerified)) + db = db.Where("active_attempts > ?", 0) + result := db.UpdateColumn("active_attempts", gorm.Expr("active_attempts - 1")) + if result.Error != nil { + return fmt.Errorf("Bundle.DecreaseActiveAttemptsByHash error: %w, bundle hash: %v", result.Error, bundleHash) + } + if result.RowsAffected == 0 { + log.Warn("No rows were affected in DecreaseActiveAttemptsByHash", "bundle hash", bundleHash) + } + return nil +} diff --git a/coordinator/internal/orm/chunk.go b/coordinator/internal/orm/chunk.go index 36045356cf..a0d701b937 100644 --- a/coordinator/internal/orm/chunk.go +++ b/coordinator/internal/orm/chunk.go @@ -74,11 +74,11 @@ func (*Chunk) TableName() string { // GetUnassignedChunk retrieves unassigned chunk based on the specified limit. // The returned chunks are sorted in ascending order by their index. -func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) { +func (o *Chunk) GetUnassignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) { var chunk Chunk db := o.db.WithContext(ctx) - sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;", - int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum) + sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;", + int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts) err := db.Raw(sql).Scan(&chunk).Error if err != nil { return nil, fmt.Errorf("Chunk.GetUnassignedChunk error: %w", err) @@ -91,11 +91,11 @@ func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum // GetAssignedChunk retrieves assigned chunk based on the specified limit. // The returned chunks are sorted in ascending order by their index. -func (o *Chunk) GetAssignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) { +func (o *Chunk) GetAssignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) { var chunk Chunk db := o.db.WithContext(ctx) - sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;", - int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum) + sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;", + int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts) err := db.Raw(sql).Scan(&chunk).Error if err != nil { return nil, fmt.Errorf("Chunk.GetAssignedChunk error: %w", err) @@ -340,18 +340,14 @@ func (o *Chunk) UpdateProvingStatusFailed(ctx context.Context, hash string, maxA } // UpdateProofAndProvingStatusByHash updates the chunk proof and proving_status by hash. -func (o *Chunk) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.ChunkProof, status types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error { +func (o *Chunk) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof []byte, status types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error { db := o.db if len(dbTX) > 0 && dbTX[0] != nil { db = dbTX[0] } - proofBytes, err := json.Marshal(proof) - if err != nil { - return err - } updateFields := make(map[string]interface{}) - updateFields["proof"] = proofBytes + updateFields["proof"] = proof updateFields["proving_status"] = int(status) updateFields["proof_time_sec"] = proofTimeSec updateFields["proved_at"] = utils.NowUTC() diff --git a/coordinator/internal/orm/l2_block.go b/coordinator/internal/orm/l2_block.go index 68489f8ce7..f3790c879c 100644 --- a/coordinator/internal/orm/l2_block.go +++ b/coordinator/internal/orm/l2_block.go @@ -74,6 +74,19 @@ func (o *L2Block) GetL2BlockHashesByChunkHash(ctx context.Context, chunkHash str return blockHashes, nil } +// GetL2BlockByNumber retrieves the L2 block by l2 block number +func (o *L2Block) GetL2BlockByNumber(ctx context.Context, blockNumber uint64) (*L2Block, error) { + db := o.db.WithContext(ctx) + db = db.Model(&L2Block{}) + db = db.Where("number = ?", blockNumber) + + var l2Block L2Block + if err := db.First(&l2Block).Error; err != nil { + return nil, fmt.Errorf("L2Block.GetL2BlockByNumber error: %w, chunk block number: %v", err, blockNumber) + } + return &l2Block, nil +} + // InsertL2Blocks inserts l2 blocks into the "l2_block" table. // for unit test func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*encoding.Block) error { diff --git a/coordinator/internal/types/auth.go b/coordinator/internal/types/auth.go index e89e25c185..faa980fa8a 100644 --- a/coordinator/internal/types/auth.go +++ b/coordinator/internal/types/auth.go @@ -1,6 +1,15 @@ package types -import "time" +import ( + "crypto/ecdsa" + "encoding/hex" + "time" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/common/hexutil" + "github.com/scroll-tech/go-ethereum/crypto" + "github.com/scroll-tech/go-ethereum/rlp" +) const ( // PublicKey the public key for context @@ -9,26 +18,88 @@ const ( ProverName = "prover_name" // ProverVersion the prover version for context ProverVersion = "prover_version" - // HardForkName the fork name for context - HardForkName = "hard_fork_name" ) +// LoginSchema for /login response +type LoginSchema struct { + Time time.Time `json:"time"` + Token string `json:"token"` +} + // Message the login message struct type Message struct { - Challenge string `form:"challenge" json:"challenge" binding:"required"` - ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"` - ProverName string `form:"prover_name" json:"prover_name" binding:"required"` - HardForkName string `form:"hard_fork_name" json:"hard_fork_name"` + Challenge string `form:"challenge" json:"challenge" binding:"required"` + ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"` + ProverName string `form:"prover_name" json:"prover_name" binding:"required"` + ProverTypes []ProverType `form:"prover_types" json:"prover_types"` + VKs []string `form:"vks" json:"vks"` } // LoginParameter for /login api type LoginParameter struct { Message Message `form:"message" json:"message" binding:"required"` + PublicKey string `form:"public_key" json:"public_key"` Signature string `form:"signature" json:"signature" binding:"required"` } -// LoginSchema for /login response -type LoginSchema struct { - Time time.Time `json:"time"` - Token string `json:"token"` +// SignWithKey auth message with private key and set public key in auth message's Identity +func (a *LoginParameter) SignWithKey(priv *ecdsa.PrivateKey) error { + // Hash identity content + hash, err := a.Message.Hash() + if err != nil { + return err + } + + // Sign register message + sig, err := crypto.Sign(hash, priv) + if err != nil { + return err + } + + a.Signature = hexutil.Encode(sig) + return nil +} + +// Verify verifies the message of auth. +func (a *LoginParameter) Verify() (bool, error) { + hash, err := a.Message.Hash() + if err != nil { + return false, err + } + + expectedPubKey, err := a.Message.DecodeAndUnmarshalPubkey(a.PublicKey) + if err != nil { + return false, err + } + + sig := common.FromHex(a.Signature) + isValid := crypto.VerifySignature(crypto.CompressPubkey(expectedPubKey), hash, sig[:len(sig)-1]) + return isValid, nil +} + +// Hash returns the hash of the auth message, which should be the message used +// to construct the Signature. +func (i *Message) Hash() ([]byte, error) { + byt, err := rlp.EncodeToBytes(i) + if err != nil { + return nil, err + } + hash := crypto.Keccak256Hash(byt) + return hash[:], nil +} + +// DecodeAndUnmarshalPubkey decodes a hex-encoded public key and unmarshal it into an ecdsa.PublicKey +func (i *Message) DecodeAndUnmarshalPubkey(pubKeyHex string) (*ecdsa.PublicKey, error) { + // Decode hex string to bytes + byteKey, err := hex.DecodeString(pubKeyHex) + if err != nil { + return nil, err + } + + // Unmarshal bytes to ECDSA public key + pubKey, err := crypto.DecompressPubkey(byteKey) + if err != nil { + return nil, err + } + return pubKey, nil } diff --git a/coordinator/internal/types/auth_test.go b/coordinator/internal/types/auth_test.go new file mode 100644 index 0000000000..ce4cd29ede --- /dev/null +++ b/coordinator/internal/types/auth_test.go @@ -0,0 +1,78 @@ +package types + +import ( + "encoding/hex" + "testing" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/crypto" + "github.com/stretchr/testify/assert" +) + +func TestAuthMessageSignAndVerify(t *testing.T) { + privateKey, err := crypto.GenerateKey() + assert.NoError(t, err) + publicKeyHex := common.Bytes2Hex(crypto.CompressPubkey(&privateKey.PublicKey)) + + var authMsg LoginParameter + t.Run("sign", func(t *testing.T) { + authMsg = LoginParameter{ + Message: Message{ + ProverName: "test1", + ProverVersion: "v0.0.1", + Challenge: "abcdef", + ProverTypes: []ProverType{ProverTypeBatch}, + VKs: []string{"vk1", "vk2"}, + }, + PublicKey: publicKeyHex, + } + + err = authMsg.SignWithKey(privateKey) + assert.NoError(t, err) + }) + + t.Run("valid verify", func(t *testing.T) { + ok, verifyErr := authMsg.Verify() + assert.True(t, ok) + assert.NoError(t, verifyErr) + }) + + t.Run("invalid verify", func(t *testing.T) { + authMsg.Message.Challenge = "abcdefgh" + ok, verifyErr := authMsg.Verify() + assert.False(t, ok) + assert.NoError(t, verifyErr) + }) +} + +// TestGenerateSignature this unit test isn't for test, just generate the signature for manually test. +func TestGenerateSignature(t *testing.T) { + privateKeyHex := "8b8df68fddf7ee2724b79ccbd07799909d59b4dd4f4df3f6ecdc4fb8d56bdf4c" + privateKeyBytes, err := hex.DecodeString(privateKeyHex) + assert.Nil(t, err) + privateKey, err := crypto.ToECDSA(privateKeyBytes) + assert.NoError(t, err) + assert.NoError(t, err) + publicKeyHex := common.Bytes2Hex(crypto.CompressPubkey(&privateKey.PublicKey)) + + t.Log("publicKey: ", publicKeyHex) + + authMsg := LoginParameter{ + Message: Message{ + ProverName: "test", + ProverVersion: "v4.4.32-37af5ef5-38a68e2-1c5093c", + Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjEzMjc5MTIsIm9yaWdfaWF0IjoxNzIxMzI0MzEyLCJyYW5kb20iOiJWMVFlT19yNEV5eGRmYUtDalprVExEa0ZIemEyNTdQRG93dTV4SnVxYTdZPSJ9.x-B_TnkTUvs8-hiMfJXejxetAP6rXfeRUmyZ3S0uBiM", + ProverTypes: []ProverType{ProverTypeBatch}, + VKs: []string{"AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD9jfGkei+f0wNYpkjW7JO12EfU7CjYVBo+PGku3zaQJI64lbn6BwyTBa4RfrPFpV5mP47ix0sXZ+Wt5wklMLRW7OIJb1yfCDm+gkSsp3/Zqrxt4SY4rQ4WtHfynTCQ0KDi78jNuiFvwxO3ub3DkgGVaxMkGxTRP/Vz6E7MCZMUBR5wZFcMzJn+73f0wYjDxfj00krg9O1VrwVxbVV1ycLR6oQLcOgm/l+xwth8io0vDpF9OY21gD5DgJn9GgcYe8KoRVEbEqApLZPdBibpcSMTY9czZI2LnFcqrDDmYvhEwgjhZrsTog2xLXOODoOupZ/is5ekQ9Gi0y871b1mLlCGA=", + "AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD1DEjW4Kell67H07wazT5DdzrSh4+amh+cmosQHp9p9snFypyoBGt3UHtoJGQBZlywZWDS9ht5pnaEoGBdaKcQk+lFb+WxTiId0KOAa0mafTZTQw8yToy57Jple64qzlRu1dux30tZZGuerLN1CKzg5Xl2iOpMK+l87jCINwVp5cUtF/XrvhBbU7onKh3KBiy99iUqVyA3Y6iiIZhGKWBSuSA4bNgDYIoVkqjHpdL35aEShoRO6pNXt7rDzxFoPzH0JuPI54nE4OhVrzZXwtkAEosxVa/fszcE092FH+HhhtxZBYe/KEzwdISU9TOPdId3UF/UMYC0MiYOlqffVTgAg="}, + }, + PublicKey: publicKeyHex, + } + err = authMsg.SignWithKey(privateKey) + assert.NoError(t, err) + t.Log("signature: ", authMsg.Signature) + + verify, err := authMsg.Verify() + assert.NoError(t, err) + assert.True(t, verify) +} diff --git a/coordinator/internal/types/get_task.go b/coordinator/internal/types/get_task.go index 744643f21c..aba15007db 100644 --- a/coordinator/internal/types/get_task.go +++ b/coordinator/internal/types/get_task.go @@ -2,10 +2,9 @@ package types // GetTaskParameter for ProverTasks request parameter type GetTaskParameter struct { - ProverHeight uint64 `form:"prover_height" json:"prover_height"` - TaskType int `form:"task_type" json:"task_type"` - VK string `form:"vk" json:"vk"` // will be deprecated after all go_prover offline - VKs []string `form:"vks" json:"vks"` // for rust_prover that supporting multi-circuits + ProverHeight uint64 `form:"prover_height" json:"prover_height"` + TaskType int `form:"task_type" json:"task_type"` + TaskTypes []int `form:"task_types" json:"task_types"` } // GetTaskSchema the schema data return to prover for get prover task diff --git a/coordinator/internal/types/prover.go b/coordinator/internal/types/prover.go new file mode 100644 index 0000000000..9dcc9fbeb9 --- /dev/null +++ b/coordinator/internal/types/prover.go @@ -0,0 +1,42 @@ +package types + +import ( + "fmt" + + "scroll-tech/common/types/message" +) + +// ProverType represents the type of prover. +type ProverType uint8 + +func (r ProverType) String() string { + switch r { + case ProverTypeChunk: + return "prover type chunk" + case ProverTypeBatch: + return "prover type batch" + default: + return fmt.Sprintf("illegal prover type: %d", r) + } +} + +const ( + // ProverTypeUndefined is an unknown prover type + ProverTypeUndefined ProverType = iota + // ProverTypeChunk signals it's a chunk prover, which can prove chunk_tasks + ProverTypeChunk + // ProverTypeBatch signals it's a batch prover, which can prove batch_tasks and bundle_tasks + ProverTypeBatch +) + +// MakeProverType make ProverType from ProofType +func MakeProverType(proof_type message.ProofType) ProverType { + switch proof_type { + case message.ProofTypeChunk: + return ProverTypeChunk + case message.ProofTypeBatch, message.ProofTypeBundle: + return ProverTypeBatch + default: + return ProverTypeUndefined + } +} diff --git a/coordinator/internal/types/submit_proof.go b/coordinator/internal/types/submit_proof.go index de3720a70c..cbf325e225 100644 --- a/coordinator/internal/types/submit_proof.go +++ b/coordinator/internal/types/submit_proof.go @@ -2,13 +2,11 @@ package types // SubmitProofParameter the SubmitProof api request parameter type SubmitProofParameter struct { - // TODO when prover have upgrade, need change this field to required - UUID string `form:"uuid" json:"uuid"` - TaskID string `form:"task_id" json:"task_id" binding:"required"` - TaskType int `form:"task_type" json:"task_type" binding:"required"` - Status int `form:"status" json:"status"` - Proof string `form:"proof" json:"proof"` - FailureType int `form:"failure_type" json:"failure_type"` - FailureMsg string `form:"failure_msg" json:"failure_msg"` - HardForkName string `form:"hard_fork_name" json:"hard_fork_name"` + UUID string `form:"uuid" json:"uuid"` + TaskID string `form:"task_id" json:"task_id" binding:"required"` + TaskType int `form:"task_type" json:"task_type" binding:"required"` + Status int `form:"status" json:"status"` + Proof string `form:"proof" json:"proof"` + FailureType int `form:"failure_type" json:"failure_type"` + FailureMsg string `form:"failure_msg" json:"failure_msg"` } diff --git a/coordinator/test/api_test.go b/coordinator/test/api_test.go index 06af3a7dc8..58a444ce9f 100644 --- a/coordinator/test/api_test.go +++ b/coordinator/test/api_test.go @@ -34,10 +34,9 @@ import ( ) const ( - forkNumberFour = 4 - forkNumberThree = 3 - forkNumberTwo = 2 - forkNumberOne = 1 + forkNumberTwo = 2 + forkNumberOne = 1 + minProverVersion = "v2.0.0" ) var ( @@ -52,17 +51,10 @@ var ( proverTaskOrm *orm.ProverTask proverBlockListOrm *orm.ProverBlockList - block1 *encoding.Block - block2 *encoding.Block - - chunk *encoding.Chunk - hardForkChunk1 *encoding.Chunk - hardForkChunk2 *encoding.Chunk - - batch *encoding.Batch - hardForkBatch1 *encoding.Batch - hardForkBatch2 *encoding.Batch - + block1 *encoding.Block + block2 *encoding.Block + chunk *encoding.Chunk + batch *encoding.Batch tokenTimeout int ) @@ -89,7 +81,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri assert.NoError(t, err) assert.NoError(t, migrate.ResetDB(sqlDB)) - tokenTimeout = 6 + tokenTimeout = 60 conf = &config.Config{ L2: &config.L2{ ChainID: 111, @@ -99,11 +91,12 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri Verifier: &config.VerifierConfig{ MockMode: true, }, - BatchCollectionTimeSec: 10, - ChunkCollectionTimeSec: 10, - MaxVerifierWorkers: 10, - SessionAttempts: 5, - MinProverVersion: version.Version, + BatchCollectionTimeSec: 10, + ChunkCollectionTimeSec: 10, + BundleCollectionTimeSec: 10, + MaxVerifierWorkers: 10, + SessionAttempts: 5, + MinProverVersion: minProverVersion, }, Auth: &config.Auth{ ChallengeExpireDurationSec: tokenTimeout, @@ -152,7 +145,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri func setEnv(t *testing.T) { var err error - version.Version = "v4.1.98" + version.Version = "v4.2.0" glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat())) glogger.Verbosity(log.LvlInfo) @@ -186,14 +179,9 @@ func setEnv(t *testing.T) { assert.NoError(t, err) chunk = &encoding.Chunk{Blocks: []*encoding.Block{block1, block2}} - hardForkChunk1 = &encoding.Chunk{Blocks: []*encoding.Block{block1}} - hardForkChunk2 = &encoding.Chunk{Blocks: []*encoding.Block{block2}} - assert.NoError(t, err) - batch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk}} - hardForkBatch1 = &encoding.Batch{Index: 1, Chunks: []*encoding.Chunk{hardForkChunk1}} - hardForkBatch2 = &encoding.Batch{Index: 2, Chunks: []*encoding.Chunk{hardForkChunk2}} + } func TestApis(t *testing.T) { @@ -208,7 +196,6 @@ func TestApis(t *testing.T) { t.Run("TestInvalidProof", testInvalidProof) t.Run("TestProofGeneratedFailed", testProofGeneratedFailed) t.Run("TestTimeoutProof", testTimeoutProof) - t.Run("TestHardFork", testHardForkAssignTask) } func testHandshake(t *testing.T) { @@ -261,12 +248,12 @@ func testGetTaskBlocked(t *testing.T) { assert.NoError(t, err) expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion) - code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead") + code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk) assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code) assert.Equal(t, expectedErr, errors.New(errMsg)) expectedErr = errors.New("get empty prover task") - code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead") + code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch) assert.Equal(t, types.ErrCoordinatorEmptyProofData, code) assert.Equal(t, expectedErr, errors.New(errMsg)) @@ -277,12 +264,12 @@ func testGetTaskBlocked(t *testing.T) { assert.NoError(t, err) expectedErr = errors.New("get empty prover task") - code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead") + code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk) assert.Equal(t, types.ErrCoordinatorEmptyProofData, code) assert.Equal(t, expectedErr, errors.New(errMsg)) expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion) - code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead") + code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch) assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code) assert.Equal(t, expectedErr, errors.New(errMsg)) } @@ -301,249 +288,17 @@ func testOutdatedProverVersion(t *testing.T) { batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch, "v1.999.999") assert.True(t, chunkProver.healthCheckSuccess(t)) - expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, chunkProver.proverVersion) - code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead") - assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code) + expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", minProverVersion, chunkProver.proverVersion) + code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk) + assert.Equal(t, types.ErrJWTCommonErr, code) assert.Equal(t, expectedErr, errors.New(errMsg)) - expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, batchProver.proverVersion) - code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead") - assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code) + expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", minProverVersion, batchProver.proverVersion) + code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch) + assert.Equal(t, types.ErrJWTCommonErr, code) assert.Equal(t, expectedErr, errors.New(errMsg)) } -func testHardForkAssignTask(t *testing.T) { - tests := []struct { - name string - proofType message.ProofType - forkNumbers map[string]int64 - proverForkNames []string - exceptTaskNumber int - exceptGetTaskErrCodes []int - exceptGetTaskErrMsgs []string - }{ - { // hard fork 4, prover 4 block [2-3] - name: "noTaskForkChunkProverVersionLargeOrEqualThanHardFork", - proofType: message.ProofTypeChunk, - forkNumbers: map[string]int64{"bernoulli": forkNumberFour}, - exceptTaskNumber: 0, - proverForkNames: []string{"bernoulli", "bernoulli"}, - exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData}, - exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"}, - }, - { - name: "noTaskForkBatchProverVersionLargeOrEqualThanHardFork", - proofType: message.ProofTypeBatch, - forkNumbers: map[string]int64{"bernoulli": forkNumberFour}, - exceptTaskNumber: 0, - proverForkNames: []string{"bernoulli", "bernoulli"}, - exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData}, - exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"}, - }, - { // hard fork 1, prover 1 block [2-3] - name: "noTaskForkChunkProverVersionLessThanHardFork", - proofType: message.ProofTypeChunk, - forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "homestead": forkNumberOne}, - exceptTaskNumber: 0, - proverForkNames: []string{"homestead", "homestead"}, - exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData}, - exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"}, - }, - { - name: "noTaskForkBatchProverVersionLessThanHardFork", - proofType: message.ProofTypeBatch, - forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "homestead": forkNumberOne}, - exceptTaskNumber: 0, - proverForkNames: []string{"homestead", "homestead"}, - exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData}, - exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"}, - }, - { - name: "noTaskForkBatchProverVersionLessThanHardForkProverNumberEqual0", - proofType: message.ProofTypeBatch, - forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree}, - exceptTaskNumber: 0, - proverForkNames: []string{"", ""}, - exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData}, - exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"}, - }, - { // hard fork 3, prover 3 block [2-3] - name: "oneTaskForkChunkProverVersionLargeOrEqualThanHardFork", - proofType: message.ProofTypeChunk, - forkNumbers: map[string]int64{"london": forkNumberThree}, - exceptTaskNumber: 1, - proverForkNames: []string{"london", "london"}, - exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData}, - exceptGetTaskErrMsgs: []string{"", "get empty prover task"}, - }, - { - name: "oneTaskForkBatchProverVersionLargeOrEqualThanHardFork", - proofType: message.ProofTypeBatch, - forkNumbers: map[string]int64{"london": forkNumberThree}, - exceptTaskNumber: 1, - proverForkNames: []string{"london", "london"}, - exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData}, - exceptGetTaskErrMsgs: []string{"", "get empty prover task"}, - }, - { // hard fork 2, prover 2 block [2-3] - name: "oneTaskForkChunkProverVersionLessThanHardFork", - proofType: message.ProofTypeChunk, - forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree}, - exceptTaskNumber: 1, - proverForkNames: []string{"istanbul", "istanbul"}, - exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData}, - exceptGetTaskErrMsgs: []string{"", "get empty prover task"}, - }, - { - name: "oneTaskForkBatchProverVersionLessThanHardFork", - proofType: message.ProofTypeBatch, - forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree}, - exceptTaskNumber: 1, - proverForkNames: []string{"istanbul", "istanbul"}, - exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData}, - exceptGetTaskErrMsgs: []string{"", "get empty prover task"}, - }, - { // hard fork 2, prover 2 block [2-3] - name: "twoTaskForkChunkProverVersionLargeOrEqualThanHardFork", - proofType: message.ProofTypeChunk, - forkNumbers: map[string]int64{"istanbul": forkNumberTwo}, - exceptTaskNumber: 2, - proverForkNames: []string{"istanbul", "istanbul"}, - exceptGetTaskErrCodes: []int{types.Success, types.Success}, - exceptGetTaskErrMsgs: []string{"", ""}, - }, - { - name: "twoTaskForkBatchProverVersionLargeOrEqualThanHardFork", - proofType: message.ProofTypeBatch, - forkNumbers: map[string]int64{"istanbul": forkNumberTwo}, - exceptTaskNumber: 2, - proverForkNames: []string{"istanbul", "istanbul"}, - exceptGetTaskErrCodes: []int{types.Success, types.Success}, - exceptGetTaskErrMsgs: []string{"", ""}, - }, - { // hard fork 4, prover 3 block [2-3] - name: "twoTaskForkChunkProverVersionLessThanHardFork", - proofType: message.ProofTypeChunk, - forkNumbers: map[string]int64{"bernoulli": forkNumberFour, "istanbul": forkNumberTwo}, - exceptTaskNumber: 2, - proverForkNames: []string{"istanbul", "istanbul"}, - exceptGetTaskErrCodes: []int{types.Success, types.Success}, - exceptGetTaskErrMsgs: []string{"", ""}, - }, - { // hard fork 3, prover1:2 prover2:3 block [2-3] - name: "twoTaskForkChunkProverVersionMiddleHardFork", - proofType: message.ProofTypeChunk, - forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree}, - exceptTaskNumber: 2, - proverForkNames: []string{"istanbul", "london"}, - exceptGetTaskErrCodes: []int{types.Success, types.Success}, - exceptGetTaskErrMsgs: []string{"", ""}, - }, - { - name: "twoTaskForkBatchProverVersionMiddleHardFork", - proofType: message.ProofTypeBatch, - forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree}, - exceptTaskNumber: 2, - proverForkNames: []string{"istanbul", "london"}, - exceptGetTaskErrCodes: []int{types.Success, types.Success}, - exceptGetTaskErrMsgs: []string{"", ""}, - }, - { // hard fork 3, prover1:2 prover2:3 block [2-3] - name: "twoTaskForkChunkProverVersionMiddleHardForkProverNumberEqual0", - proofType: message.ProofTypeChunk, - forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree}, - exceptTaskNumber: 2, - proverForkNames: []string{"", "london"}, - exceptGetTaskErrCodes: []int{types.Success, types.Success}, - exceptGetTaskErrMsgs: []string{"", ""}, - }, - { - name: "twoTaskForkBatchProverVersionMiddleHardForkProverNumberEqual0", - proofType: message.ProofTypeBatch, - forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree}, - exceptTaskNumber: 2, - proverForkNames: []string{"", "london"}, - exceptGetTaskErrCodes: []int{types.Success, types.Success}, - exceptGetTaskErrMsgs: []string{"", ""}, - }, - { // hard fork 2, prover 2 block [2-3] - name: "oneTaskForkChunkProverVersionLessThanHardForkProverNumberEqual0", - proofType: message.ProofTypeChunk, - forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree}, - exceptTaskNumber: 1, - proverForkNames: []string{"", ""}, - exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData}, - exceptGetTaskErrMsgs: []string{"", "get empty prover task"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - coordinatorURL := randomURL() - collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, tt.forkNumbers) - defer func() { - collector.Stop() - assert.NoError(t, httpHandler.Shutdown(context.Background())) - }() - - chunkProof := &message.ChunkProof{ - StorageTrace: []byte("testStorageTrace"), - Protocol: []byte("testProtocol"), - Proof: []byte("testProof"), - Instances: []byte("testInstance"), - Vk: []byte("testVk"), - ChunkInfo: nil, - } - - // the insert block number is 2 and 3 - // chunk1 batch1 contains block number 2 - // chunk2 batch2 contains block number 3 - err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) - assert.NoError(t, err) - - dbHardForkChunk1, err := chunkOrm.InsertChunk(context.Background(), hardForkChunk1) - assert.NoError(t, err) - err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 2, dbHardForkChunk1.Hash) - assert.NoError(t, err) - err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbHardForkChunk1.Hash, chunkProof, types.ProvingTaskUnassigned, 1) - assert.NoError(t, err) - dbHardForkBatch1, err := batchOrm.InsertBatch(context.Background(), hardForkBatch1) - assert.NoError(t, err) - err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, dbHardForkBatch1.Hash) - assert.NoError(t, err) - err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbHardForkBatch1.Hash, types.ChunkProofsStatusReady) - assert.NoError(t, err) - - dbHardForkChunk2, err := chunkOrm.InsertChunk(context.Background(), hardForkChunk2) - assert.NoError(t, err) - err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 3, 100, dbHardForkChunk2.Hash) - assert.NoError(t, err) - err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbHardForkChunk2.Hash, chunkProof, types.ProvingTaskUnassigned, 1) - assert.NoError(t, err) - dbHardForkBatch2, err := batchOrm.InsertBatch(context.Background(), hardForkBatch2) - assert.NoError(t, err) - err = chunkOrm.UpdateBatchHashInRange(context.Background(), 1, 1, dbHardForkBatch2.Hash) - assert.NoError(t, err) - err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbHardForkBatch2.Hash, types.ChunkProofsStatusReady) - assert.NoError(t, err) - - getTaskNumber := 0 - for i := 0; i < 2; i++ { - mockProver := newMockProver(t, fmt.Sprintf("mock_prover_%d", i), coordinatorURL, tt.proofType, version.Version) - proverTask, errCode, errMsg := mockProver.getProverTask(t, tt.proofType, tt.proverForkNames[i]) - assert.Equal(t, tt.exceptGetTaskErrCodes[i], errCode) - assert.Equal(t, tt.exceptGetTaskErrMsgs[i], errMsg) - if errCode != types.Success { - continue - } - getTaskNumber++ - mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success, tt.proverForkNames[i]) - } - assert.Equal(t, getTaskNumber, tt.exceptTaskNumber) - }) - } -} - func testValidProof(t *testing.T) { coordinatorURL := randomURL() collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"istanbul": forkNumberTwo}) @@ -575,12 +330,12 @@ func testValidProof(t *testing.T) { provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version) - proofStatus := verifiedSuccess - proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul") - assert.Equal(t, errCode, types.Success) - assert.Equal(t, errMsg, "") + exceptProofStatus := verifiedSuccess + proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType) + assert.Equal(t, types.Success, errCode) + assert.Equal(t, "", errMsg) assert.NotNil(t, proverTask) - provers[i].submitProof(t, proverTask, proofStatus, types.Success, "istanbul") + provers[i].submitProof(t, proverTask, exceptProofStatus, types.Success) } // verify proof status @@ -641,39 +396,69 @@ func testInvalidProof(t *testing.T) { assert.NoError(t, err) err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash) assert.NoError(t, err) - batch, err := batchOrm.InsertBatch(context.Background(), batch) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch) assert.NoError(t, err) - err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady) + err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, dbBatch.Hash) + assert.NoError(t, err) + err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbBatch.Hash, types.ChunkProofsStatusReady) assert.NoError(t, err) - proofType := message.ProofTypeBatch - provingStatus := verifiedFailed - expectErrCode := types.ErrCoordinatorHandleZkProofFailure - prover := newMockProver(t, "prover_test", coordinatorURL, proofType, version.Version) - proverTask, errCode, errMsg := prover.getProverTask(t, proofType, "istanbul") - assert.NotNil(t, proverTask) - assert.Equal(t, errCode, types.Success) - assert.Equal(t, errMsg, "") - prover.submitProof(t, proverTask, provingStatus, expectErrCode, "istanbul") + // create mock provers. + provers := make([]*mockProver, 2) + for i := 0; i < len(provers); i++ { + var ( + proofType message.ProofType + provingStatus proofStatus + exceptCode int + ) + + if i%2 == 0 { + proofType = message.ProofTypeChunk + provingStatus = verifiedSuccess + exceptCode = types.Success + } else { + proofType = message.ProofTypeBatch + provingStatus = verifiedFailed + exceptCode = types.ErrCoordinatorHandleZkProofFailure + } + + provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version) + proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType) + assert.Equal(t, types.Success, errCode) + assert.Equal(t, "", errMsg) + assert.NotNil(t, proverTask) + provers[i].submitProof(t, proverTask, provingStatus, exceptCode) + } // verify proof status var ( tick = time.Tick(1500 * time.Millisecond) tickStop = time.Tick(time.Minute) + chunkProofStatus types.ProvingStatus batchProofStatus types.ProvingStatus batchActiveAttempts int16 batchMaxAttempts int16 + chunkActiveAttempts int16 + chunkMaxAttempts int16 ) for { select { case <-tick: - batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash) + chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash) + assert.NoError(t, err) + batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), dbBatch.Hash) assert.NoError(t, err) - if batchProofStatus == types.ProvingTaskAssigned { + if chunkProofStatus == types.ProvingTaskVerified && batchProofStatus == types.ProvingTaskAssigned { return } - batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash) + + chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash) + assert.NoError(t, err) + assert.Equal(t, 1, int(chunkMaxAttempts)) + assert.Equal(t, 0, int(chunkActiveAttempts)) + + batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), dbBatch.Hash) assert.NoError(t, err) assert.Equal(t, 1, int(batchMaxAttempts)) assert.Equal(t, 0, int(batchActiveAttempts)) @@ -699,26 +484,38 @@ func testProofGeneratedFailed(t *testing.T) { assert.NoError(t, err) err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash) assert.NoError(t, err) - batch, err := batchOrm.InsertBatch(context.Background(), batch) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch) assert.NoError(t, err) - err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady) + err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, dbBatch.Hash) + assert.NoError(t, err) + err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbBatch.Hash, types.ChunkProofsStatusReady) assert.NoError(t, err) // create mock provers. provers := make([]*mockProver, 2) for i := 0; i < len(provers); i++ { - var proofType message.ProofType + var ( + proofType message.ProofType + exceptCode int + exceptErrMsg string + ) if i%2 == 0 { proofType = message.ProofTypeChunk + exceptCode = types.Success + exceptErrMsg = "" } else { proofType = message.ProofTypeBatch + exceptCode = types.ErrCoordinatorGetTaskFailure + exceptErrMsg = "return prover task err:coordinator internal error" } provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version) - proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul") + proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType) assert.NotNil(t, proverTask) - assert.Equal(t, errCode, types.Success) - assert.Equal(t, errMsg, "") - provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure, "istanbul") + assert.Equal(t, errCode, exceptCode) + assert.Equal(t, errMsg, exceptErrMsg) + if errCode == types.Success { + provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure) + } } // verify proof status @@ -743,7 +540,7 @@ func testProofGeneratedFailed(t *testing.T) { case <-tick: chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash) assert.NoError(t, err) - batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash) + batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), dbBatch.Hash) assert.NoError(t, err) if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned { return @@ -754,14 +551,14 @@ func testProofGeneratedFailed(t *testing.T) { assert.Equal(t, 1, int(chunkMaxAttempts)) assert.Equal(t, 0, int(chunkActiveAttempts)) - batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash) + batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), dbBatch.Hash) assert.NoError(t, err) assert.Equal(t, 1, int(batchMaxAttempts)) assert.Equal(t, 0, int(batchActiveAttempts)) chunkProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeChunk, dbChunk.Hash) assert.NoError(t, err) - batchProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeBatch, batch.Hash) + batchProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeBatch, dbBatch.Hash) assert.NoError(t, err) if chunkProverTaskProvingStatus == types.ProverProofInvalid && batchProverTaskProvingStatus == types.ProverProofInvalid { return @@ -797,18 +594,25 @@ func testTimeoutProof(t *testing.T) { assert.NoError(t, err) batch, err := batchOrm.InsertBatch(context.Background(), batch) assert.NoError(t, err) + err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, batch.Hash) + assert.NoError(t, err) + encodeData, err := json.Marshal(message.ChunkProof{}) + assert.NoError(t, err) + assert.NotEmpty(t, encodeData) + err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbChunk.Hash, encodeData, types.ProvingTaskUnassigned, 1) + assert.NoError(t, err) err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady) assert.NoError(t, err) // create first chunk & batch mock prover, that will not send any proof. chunkProver1 := newMockProver(t, "prover_test"+strconv.Itoa(0), coordinatorURL, message.ProofTypeChunk, version.Version) - proverChunkTask, errChunkCode, errChunkMsg := chunkProver1.getProverTask(t, message.ProofTypeChunk, "istanbul") + proverChunkTask, errChunkCode, errChunkMsg := chunkProver1.getProverTask(t, message.ProofTypeChunk) assert.NotNil(t, proverChunkTask) assert.Equal(t, errChunkCode, types.Success) assert.Equal(t, errChunkMsg, "") batchProver1 := newMockProver(t, "prover_test"+strconv.Itoa(1), coordinatorURL, message.ProofTypeBatch, version.Version) - proverBatchTask, errBatchCode, errBatchMsg := batchProver1.getProverTask(t, message.ProofTypeBatch, "istanbul") + proverBatchTask, errBatchCode, errBatchMsg := batchProver1.getProverTask(t, message.ProofTypeBatch) assert.NotNil(t, proverBatchTask) assert.Equal(t, errBatchCode, types.Success) assert.Equal(t, errBatchMsg, "") @@ -837,18 +641,18 @@ func testTimeoutProof(t *testing.T) { // create second mock prover, that will send valid proof. chunkProver2 := newMockProver(t, "prover_test"+strconv.Itoa(2), coordinatorURL, message.ProofTypeChunk, version.Version) - proverChunkTask2, chunkTask2ErrCode, chunkTask2ErrMsg := chunkProver2.getProverTask(t, message.ProofTypeChunk, "istanbul") + proverChunkTask2, chunkTask2ErrCode, chunkTask2ErrMsg := chunkProver2.getProverTask(t, message.ProofTypeChunk) assert.NotNil(t, proverChunkTask2) assert.Equal(t, chunkTask2ErrCode, types.Success) assert.Equal(t, chunkTask2ErrMsg, "") - chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success, "istanbul") + chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success) batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch, version.Version) - proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch, "istanbul") + proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch) assert.NotNil(t, proverBatchTask2) assert.Equal(t, batchTask2ErrCode, types.Success) assert.Equal(t, batchTask2ErrMsg, "") - batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success, "istanbul") + batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success) // verify proof status, it should be verified now, because second prover sent valid proof chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash) diff --git a/coordinator/test/mock_prover.go b/coordinator/test/mock_prover.go index 67ce32b3a6..ddf03d93ec 100644 --- a/coordinator/test/mock_prover.go +++ b/coordinator/test/mock_prover.go @@ -51,9 +51,9 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof } // connectToCoordinator sets up a websocket client to connect to the prover manager. -func (r *mockProver) connectToCoordinator(t *testing.T, forkName string) string { +func (r *mockProver) connectToCoordinator(t *testing.T, proverTypes []types.ProverType) (string, int, string) { challengeString := r.challenge(t) - return r.login(t, challengeString, forkName) + return r.login(t, challengeString, proverTypes) } func (r *mockProver) challenge(t *testing.T) string { @@ -76,43 +76,35 @@ func (r *mockProver) challenge(t *testing.T) string { return loginData.Token } -func (r *mockProver) login(t *testing.T, challengeString string, forkName string) string { - var body string - if forkName != "" { - authMsg := message.AuthMsg{ - Identity: &message.Identity{ - Challenge: challengeString, - ProverName: r.proverName, - ProverVersion: r.proverVersion, - HardForkName: forkName, - }, - } - assert.NoError(t, authMsg.SignWithKey(r.privKey)) - body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\", \"hard_fork_name\":\"%s\"},\"signature\":\"%s\"}", - authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Identity.HardForkName, authMsg.Signature) - } else { - authMsg := message.LegacyAuthMsg{ - Identity: &message.LegacyIdentity{ - Challenge: challengeString, - ProverName: r.proverName, - ProverVersion: r.proverVersion, - }, - } - assert.NoError(t, authMsg.SignWithKey(r.privKey)) - body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}", - authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature) +func (r *mockProver) login(t *testing.T, challengeString string, proverTypes []types.ProverType) (string, int, string) { + authMsg := types.LoginParameter{ + Message: types.Message{ + Challenge: challengeString, + ProverName: r.proverName, + ProverVersion: r.proverVersion, + ProverTypes: proverTypes, + VKs: []string{"mock_vk"}, + }, + PublicKey: r.publicKey(), } + assert.NoError(t, authMsg.SignWithKey(r.privKey)) + body, err := json.Marshal(authMsg) + assert.NoError(t, err) var result ctypes.Response client := resty.New() resp, err := client.R(). SetHeader("Content-Type", "application/json"). SetHeader("Authorization", fmt.Sprintf("Bearer %s", challengeString)). - SetBody([]byte(body)). + SetBody(body). SetResult(&result). Post("http://" + r.coordinatorURL + "/coordinator/v1/login") assert.NoError(t, err) + if result.ErrCode != 0 { + return "", result.ErrCode, result.ErrMsg + } + type login struct { Time string `json:"time"` Token string `json:"token"` @@ -122,7 +114,7 @@ func (r *mockProver) login(t *testing.T, challengeString string, forkName string assert.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode()) assert.Empty(t, result.ErrMsg) - return loginData.Token + return loginData.Token, 0, "" } func (r *mockProver) healthCheckSuccess(t *testing.T) bool { @@ -149,9 +141,12 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool { return true } -func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, forkName string) (*types.GetTaskSchema, int, string) { +func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*types.GetTaskSchema, int, string) { // get task from coordinator - token := r.connectToCoordinator(t, forkName) + token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)}) + if errCode != 0 { + return nil, errCode, errMsg + } assert.NotEmpty(t, token) type response struct { @@ -176,9 +171,12 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo // Testing expected errors returned by coordinator. // //nolint:unparam -func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType, forkName string) (int, string) { +func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) { // get task from coordinator - token := r.connectToCoordinator(t, forkName) + token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)}) + if errCode != 0 { + return errCode, errMsg + } assert.NotEmpty(t, token) type response struct { @@ -201,50 +199,56 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType, return result.ErrCode, result.ErrMsg } -func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int, forkName string) { +func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) { proofMsgStatus := message.StatusOk if proofStatus == generatedFailed { proofMsgStatus = message.StatusProofError } - proof := &message.ProofMsg{ - ProofDetail: &message.ProofDetail{ - ID: proverTaskSchema.TaskID, - Type: message.ProofType(proverTaskSchema.TaskType), - Status: proofMsgStatus, - ChunkProof: &message.ChunkProof{}, - BatchProof: &message.BatchProof{}, - }, + var proof []byte + switch proverTaskSchema.TaskType { + case int(message.ProofTypeChunk): + encodeData, err := json.Marshal(message.ChunkProof{}) + assert.NoError(t, err) + assert.NotEmpty(t, encodeData) + proof = encodeData + case int(message.ProofTypeBatch): + encodeData, err := json.Marshal(message.BatchProof{}) + assert.NoError(t, err) + assert.NotEmpty(t, encodeData) + proof = encodeData } - if proofStatus == generatedFailed { - proof.Status = message.StatusProofError - } else if proofStatus == verifiedFailed { - proof.ProofDetail.ChunkProof.Proof = []byte(verifier.InvalidTestProof) - proof.ProofDetail.BatchProof.Proof = []byte(verifier.InvalidTestProof) + if proofStatus == verifiedFailed { + switch proverTaskSchema.TaskType { + case int(message.ProofTypeChunk): + chunkProof := message.ChunkProof{} + chunkProof.Proof = []byte(verifier.InvalidTestProof) + encodeData, err := json.Marshal(&chunkProof) + assert.NoError(t, err) + assert.NotEmpty(t, encodeData) + proof = encodeData + case int(message.ProofTypeBatch): + batchProof := message.BatchProof{} + batchProof.Proof = []byte(verifier.InvalidTestProof) + encodeData, err := json.Marshal(&batchProof) + assert.NoError(t, err) + assert.NotEmpty(t, encodeData) + proof = encodeData + } } - assert.NoError(t, proof.Sign(r.privKey)) submitProof := types.SubmitProofParameter{ - TaskID: proof.ID, - TaskType: int(proof.Type), - Status: int(proof.Status), - } - - switch proof.Type { - case message.ProofTypeChunk: - encodeData, err := json.Marshal(proof.ChunkProof) - assert.NoError(t, err) - assert.NotEmpty(t, encodeData) - submitProof.Proof = string(encodeData) - case message.ProofTypeBatch: - encodeData, err := json.Marshal(proof.BatchProof) - assert.NoError(t, err) - assert.NotEmpty(t, encodeData) - submitProof.Proof = string(encodeData) + UUID: proverTaskSchema.UUID, + TaskID: proverTaskSchema.TaskID, + TaskType: proverTaskSchema.TaskType, + Status: int(proofMsgStatus), + Proof: string(proof), } - token := r.connectToCoordinator(t, forkName) + token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))}) + assert.Equal(t, authErrCode, 0) + assert.Equal(t, errMsg, "") assert.NotEmpty(t, token) submitProofData, err := json.Marshal(submitProof) diff --git a/database/config.json b/database/config.json index eda41de4c4..58335b5972 100644 --- a/database/config.json +++ b/database/config.json @@ -1,5 +1,5 @@ { - "dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable", + "dsn": "postgres://localhost/scroll?sslmode=disable", "driver_name": "postgres", "maxOpenNum": 200, "maxIdleNum": 20 diff --git a/database/go.mod b/database/go.mod index 2ccc0fee1b..8366407f4c 100644 --- a/database/go.mod +++ b/database/go.mod @@ -6,7 +6,7 @@ require ( github.com/jmoiron/sqlx v1.3.5 github.com/lib/pq v1.10.9 github.com/pressly/goose/v3 v3.16.0 - github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea + github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 github.com/stretchr/testify v1.9.0 github.com/urfave/cli/v2 v2.25.7 ) @@ -33,11 +33,11 @@ require ( github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/mod v0.16.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect diff --git a/database/go.sum b/database/go.sum index 6301374cfb..81d0edeb5e 100644 --- a/database/go.sum +++ b/database/go.sum @@ -121,8 +121,8 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls= -github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA= +github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ= +github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= @@ -155,20 +155,20 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= diff --git a/database/migrate/migrate_test.go b/database/migrate/migrate_test.go index 5022e56009..1b283545a0 100644 --- a/database/migrate/migrate_test.go +++ b/database/migrate/migrate_test.go @@ -59,20 +59,20 @@ func testResetDB(t *testing.T) { cur, err := Current(pgDB) assert.NoError(t, err) // total number of tables. - assert.Equal(t, int64(20), cur) + assert.Equal(t, int64(22), cur) } func testMigrate(t *testing.T) { assert.NoError(t, Migrate(pgDB)) cur, err := Current(pgDB) assert.NoError(t, err) - assert.Equal(t, int64(20), cur) + assert.Equal(t, int64(22), cur) } func testRollback(t *testing.T) { version, err := Current(pgDB) assert.NoError(t, err) - assert.Equal(t, int64(20), version) + assert.Equal(t, int64(22), version) assert.NoError(t, Rollback(pgDB, nil)) diff --git a/database/migrate/migrations/00021_bundle.sql b/database/migrate/migrations/00021_bundle.sql new file mode 100644 index 0000000000..a5f50f5f68 --- /dev/null +++ b/database/migrate/migrations/00021_bundle.sql @@ -0,0 +1,53 @@ +-- +goose Up +-- +goose StatementBegin + +CREATE TABLE bundle ( + index BIGSERIAL PRIMARY KEY, + hash VARCHAR NOT NULL, -- Not part of DA hash, used for SQL query consistency and ease of use, derived using keccak256(concat(start_batch_hash_bytes, end_batch_hash_bytes)). + start_batch_index BIGINT NOT NULL, + end_batch_index BIGINT NOT NULL, + start_batch_hash VARCHAR NOT NULL, + end_batch_hash VARCHAR NOT NULL, + codec_version SMALLINT NOT NULL, + +-- proof + batch_proofs_status SMALLINT NOT NULL DEFAULT 1, + proving_status SMALLINT NOT NULL DEFAULT 1, + proof BYTEA DEFAULT NULL, + proved_at TIMESTAMP(0) DEFAULT NULL, + proof_time_sec INTEGER DEFAULT NULL, + total_attempts SMALLINT NOT NULL DEFAULT 0, + active_attempts SMALLINT NOT NULL DEFAULT 0, + +-- rollup + rollup_status SMALLINT NOT NULL DEFAULT 1, + finalize_tx_hash VARCHAR DEFAULT NULL, + finalized_at TIMESTAMP(0) DEFAULT NULL, + +-- metadata + created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, + deleted_at TIMESTAMP(0) DEFAULT NULL +); + +CREATE INDEX idx_bundle_index_rollup_status ON bundle(index, rollup_status) WHERE deleted_at IS NULL; +CREATE INDEX idx_bundle_hash ON bundle(hash) WHERE deleted_at IS NULL; +CREATE INDEX idx_bundle_hash_proving_status ON bundle(hash, proving_status) WHERE deleted_at IS NULL; +CREATE INDEX idx_bundle_index_desc ON bundle(index DESC) WHERE deleted_at IS NULL; +CREATE INDEX idx_bundle_batch_proofs_status ON bundle(batch_proofs_status) WHERE deleted_at IS NULL; +CREATE INDEX idx_bundle_start_batch_index ON bundle(start_batch_index) WHERE deleted_at IS NULL; +CREATE INDEX idx_bundle_end_batch_index ON bundle(end_batch_index) WHERE deleted_at IS NULL; +create index idx_bundle_total_attempts_active_attempts_batch_proofs_status + on bundle (total_attempts, active_attempts, batch_proofs_status) + where deleted_at IS NULL; + +COMMENT ON COLUMN bundle.batch_proofs_status IS 'undefined, pending, ready'; +COMMENT ON COLUMN bundle.proving_status IS 'undefined, unassigned, assigned, proved (deprecated), verified, failed'; +COMMENT ON COLUMN bundle.rollup_status IS 'undefined, pending, committing (not used for bundles), committed (not used for bundles), finalizing, finalized, commit_failed (not used for bundles), finalize_failed'; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE IF EXISTS bundle; +-- +goose StatementEnd diff --git a/database/migrate/migrations/00022_add_bundle_hash_and_codec_version_to_batch.sql b/database/migrate/migrations/00022_add_bundle_hash_and_codec_version_to_batch.sql new file mode 100644 index 0000000000..08a48fad6c --- /dev/null +++ b/database/migrate/migrations/00022_add_bundle_hash_and_codec_version_to_batch.sql @@ -0,0 +1,23 @@ +-- +goose Up +-- +goose StatementBegin + +ALTER TABLE batch +ADD COLUMN bundle_hash VARCHAR DEFAULT '', +ADD COLUMN codec_version SMALLINT DEFAULT 0; + +CREATE INDEX idx_batch_bundle_hash ON batch(bundle_hash); +CREATE INDEX idx_batch_index_codec_version ON batch(index, codec_version); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +DROP INDEX IF EXISTS idx_batch_bundle_hash; +DROP INDEX IF EXISTS idx_batch_index_codec_version; + +ALTER TABLE IF EXISTS batch +DROP COLUMN IF EXISTS bundle_hash, +DROP COLUMN IF EXISTS codec_version; + +-- +goose StatementEnd diff --git a/go.work.sum b/go.work.sum index 596dc59c89..e1251a7495 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1,806 +1,738 @@ +bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c h1:bkb2NMGo3/Du52wvYj9Whth5KZfMV6d3O0Vbr3nz/UE= bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c/go.mod h1:hSVuE3qU7grINVSwrmzHfpg9k87ALBk+XaualNyUzI4= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= -cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= +cloud.google.com/go/accessapproval v1.7.4 h1:ZvLvJ952zK8pFHINjpMBY5k7LTAp/6pBf50RDMRgBUI= cloud.google.com/go/accessapproval v1.7.4/go.mod h1:/aTEh45LzplQgFYdQdwPMR9YdX0UlhBmvB84uAmQKUc= +cloud.google.com/go/accesscontextmanager v1.8.4 h1:Yo4g2XrBETBCqyWIibN3NHNPQKUfQqti0lI+70rubeE= cloud.google.com/go/accesscontextmanager v1.8.4/go.mod h1:ParU+WbMpD34s5JFEnGAnPBYAgUHozaTmDJU7aCU9+M= +cloud.google.com/go/aiplatform v1.54.0 h1:wH7OYl9Vq/5tupok0BPTFY9xaTLb0GxkReHtB5PF7cI= cloud.google.com/go/aiplatform v1.54.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU= +cloud.google.com/go/analytics v0.21.6 h1:fnV7B8lqyEYxCU0LKk+vUL7mTlqRAq4uFlIthIdr/iA= cloud.google.com/go/analytics v0.21.6/go.mod h1:eiROFQKosh4hMaNhF85Oc9WO97Cpa7RggD40e/RBy8w= +cloud.google.com/go/apigateway v1.6.4 h1:VVIxCtVerchHienSlaGzV6XJGtEM9828Erzyr3miUGs= cloud.google.com/go/apigateway v1.6.4/go.mod h1:0EpJlVGH5HwAN4VF4Iec8TAzGN1aQgbxAWGJsnPCGGY= +cloud.google.com/go/apigeeconnect v1.6.4 h1:jSoGITWKgAj/ssVogNE9SdsTqcXnryPzsulENSRlusI= cloud.google.com/go/apigeeconnect v1.6.4/go.mod h1:CapQCWZ8TCjnU0d7PobxhpOdVz/OVJ2Hr/Zcuu1xFx0= +cloud.google.com/go/apigeeregistry v0.8.2 h1:DSaD1iiqvELag+lV4VnnqUUFd8GXELu01tKVdWZrviE= cloud.google.com/go/apigeeregistry v0.8.2/go.mod h1:h4v11TDGdeXJDJvImtgK2AFVvMIgGWjSb0HRnBSjcX8= +cloud.google.com/go/appengine v1.8.4 h1:Qub3fqR7iA1daJWdzjp/Q0Jz0fUG0JbMc7Ui4E9IX/E= cloud.google.com/go/appengine v1.8.4/go.mod h1:TZ24v+wXBujtkK77CXCpjZbnuTvsFNT41MUaZ28D6vg= +cloud.google.com/go/area120 v0.8.4 h1:YnSO8m02pOIo6AEOgiOoUDVbw4pf+bg2KLHi4rky320= cloud.google.com/go/area120 v0.8.4/go.mod h1:jfawXjxf29wyBXr48+W+GyX/f8fflxp642D/bb9v68M= +cloud.google.com/go/artifactregistry v1.14.6 h1:/hQaadYytMdA5zBh+RciIrXZQBWK4vN7EUsrQHG+/t8= cloud.google.com/go/artifactregistry v1.14.6/go.mod h1:np9LSFotNWHcjnOgh8UVK0RFPCTUGbO0ve3384xyHfE= +cloud.google.com/go/asset v1.15.3 h1:uI8Bdm81s0esVWbWrTHcjFDFKNOa9aB7rI1vud1hO84= cloud.google.com/go/asset v1.15.3/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU= +cloud.google.com/go/assuredworkloads v1.11.4 h1:FsLSkmYYeNuzDm8L4YPfLWV+lQaUrJmH5OuD37t1k20= cloud.google.com/go/assuredworkloads v1.11.4/go.mod h1:4pwwGNwy1RP0m+y12ef3Q/8PaiWrIDQ6nD2E8kvWI9U= +cloud.google.com/go/automl v1.13.4 h1:i9tOKXX+1gE7+rHpWKjiuPfGBVIYoWvLNIGpWgPtF58= cloud.google.com/go/automl v1.13.4/go.mod h1:ULqwX/OLZ4hBVfKQaMtxMSTlPx0GqGbWN8uA/1EqCP8= +cloud.google.com/go/baremetalsolution v1.2.3 h1:oQiFYYCe0vwp7J8ZmF6siVKEumWtiPFJMJcGuyDVRUk= cloud.google.com/go/baremetalsolution v1.2.3/go.mod h1:/UAQ5xG3faDdy180rCUv47e0jvpp3BFxT+Cl0PFjw5g= +cloud.google.com/go/batch v1.6.3 h1:mPiIH20a5NU02rucbAmLeO4sLPO9hrTK0BLjdHyW8xw= cloud.google.com/go/batch v1.6.3/go.mod h1:J64gD4vsNSA2O5TtDB5AAux3nJ9iV8U3ilg3JDBYejU= +cloud.google.com/go/beyondcorp v1.0.3 h1:VXf9SnrnSmj2BF2cHkoTHvOUp8gjsz1KJFOMW7czdsY= cloud.google.com/go/beyondcorp v1.0.3/go.mod h1:HcBvnEd7eYr+HGDd5ZbuVmBYX019C6CEXBonXbCVwJo= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.57.1 h1:FiULdbbzUxWD0Y4ZGPSVCDLvqRSyCIO6zKV7E2nf5uA= cloud.google.com/go/bigquery v1.57.1/go.mod h1:iYzC0tGVWt1jqSzBHqCr3lrRn0u13E8e+AqowBsDgug= -cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/billing v1.17.4 h1:77/4kCqzH6Ou5CCDzNmqmboE+WvbwFBJmw1QZQz19AI= cloud.google.com/go/billing v1.17.4/go.mod h1:5DOYQStCxquGprqfuid/7haD7th74kyMBHkjO/OvDtk= +cloud.google.com/go/binaryauthorization v1.7.3 h1:3R6WYn1JKIaVicBmo18jXubu7xh4mMkmbIgsTXk0cBA= cloud.google.com/go/binaryauthorization v1.7.3/go.mod h1:VQ/nUGRKhrStlGr+8GMS8f6/vznYLkdK5vaKfdCIpvU= +cloud.google.com/go/certificatemanager v1.7.4 h1:5YMQ3Q+dqGpwUZ9X5sipsOQ1fLPsxod9HNq0+nrqc6I= cloud.google.com/go/certificatemanager v1.7.4/go.mod h1:FHAylPe/6IIKuaRmHbjbdLhGhVQ+CWHSD5Jq0k4+cCE= +cloud.google.com/go/channel v1.17.3 h1:Rd4+fBrjiN6tZ4TR8R/38elkyEkz6oogGDr7jDyjmMY= cloud.google.com/go/channel v1.17.3/go.mod h1:QcEBuZLGGrUMm7kNj9IbU1ZfmJq2apotsV83hbxX7eE= +cloud.google.com/go/cloudbuild v1.15.0 h1:9IHfEMWdCklJ1cwouoiQrnxmP0q3pH7JUt8Hqx4Qbck= cloud.google.com/go/cloudbuild v1.15.0/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhVzGndL1LwleEM= +cloud.google.com/go/clouddms v1.7.3 h1:xe/wJKz55VO1+L891a1EG9lVUgfHr9Ju/I3xh1nwF84= cloud.google.com/go/clouddms v1.7.3/go.mod h1:fkN2HQQNUYInAU3NQ3vRLkV2iWs8lIdmBKOx4nrL6Hc= +cloud.google.com/go/cloudtasks v1.12.4 h1:5xXuFfAjg0Z5Wb81j2GAbB3e0bwroCeSF+5jBn/L650= cloud.google.com/go/cloudtasks v1.12.4/go.mod h1:BEPu0Gtt2dU6FxZHNqqNdGqIG86qyWKBPGnsb7udGY0= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= -cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/contactcenterinsights v1.12.0 h1:wP41IUA4ucMVooj/TP53jd7vbNjWrDkAPOeulVJGT5U= cloud.google.com/go/contactcenterinsights v1.12.0/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis= +cloud.google.com/go/container v1.28.0 h1:/o82CFWXIYnT9p/07SnRgybqL3Pmmu86jYIlzlJVUBY= cloud.google.com/go/container v1.28.0/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= +cloud.google.com/go/containeranalysis v0.11.3 h1:5rhYLX+3a01drpREqBZVXR9YmWH45RnML++8NsCtuD8= cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U= +cloud.google.com/go/datacatalog v1.19.0 h1:rbYNmHwvAOOwnW2FPXYkaK3Mf1MmGqRzK0mMiIEyLdo= cloud.google.com/go/datacatalog v1.19.0/go.mod h1:5FR6ZIF8RZrtml0VUao22FxhdjkoG+a0866rEnObryM= +cloud.google.com/go/dataflow v0.9.4 h1:7VmCNWcPJBS/srN2QnStTB6nu4Eb5TMcpkmtaPVhRt4= cloud.google.com/go/dataflow v0.9.4/go.mod h1:4G8vAkHYCSzU8b/kmsoR2lWyHJD85oMJPHMtan40K8w= +cloud.google.com/go/dataform v0.9.1 h1:jV+EsDamGX6cE127+QAcCR/lergVeeZdEQ6DdrxW3sQ= cloud.google.com/go/dataform v0.9.1/go.mod h1:pWTg+zGQ7i16pyn0bS1ruqIE91SdL2FDMvEYu/8oQxs= +cloud.google.com/go/datafusion v1.7.4 h1:Q90alBEYlMi66zL5gMSGQHfbZLB55mOAg03DhwTTfsk= cloud.google.com/go/datafusion v1.7.4/go.mod h1:BBs78WTOLYkT4GVZIXQCZT3GFpkpDN4aBY4NDX/jVlM= +cloud.google.com/go/datalabeling v0.8.4 h1:zrq4uMmunf2KFDl/7dS6iCDBBAxBnKVDyw6+ajz3yu0= cloud.google.com/go/datalabeling v0.8.4/go.mod h1:Z1z3E6LHtffBGrNUkKwbwbDxTiXEApLzIgmymj8A3S8= +cloud.google.com/go/dataplex v1.11.2 h1:AfFFR15Ifh4U+Me1IBztrSd5CrasTODzy3x8KtDyHdc= cloud.google.com/go/dataplex v1.11.2/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c= +cloud.google.com/go/dataproc v1.12.0 h1:W47qHL3W4BPkAIbk4SWmIERwsWBaNnWm0P2sdx3YgGU= cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataproc/v2 v2.3.0 h1:tTVP9tTxmc8fixxOd/8s6Q6Pz/+yzn7r7XdZHretQH0= cloud.google.com/go/dataproc/v2 v2.3.0/go.mod h1:G5R6GBc9r36SXv/RtZIVfB8SipI+xVn0bX5SxUzVYbY= +cloud.google.com/go/dataqna v0.8.4 h1:NJnu1kAPamZDs/if3bJ3+Wb6tjADHKL83NUWsaIp2zg= cloud.google.com/go/dataqna v0.8.4/go.mod h1:mySRKjKg5Lz784P6sCov3p1QD+RZQONRMRjzGNcFd0c= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.15.0 h1:0P9WcsQeTWjuD1H14JIY7XQscIPQ4Laje8ti96IC5vg= cloud.google.com/go/datastore v1.15.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= +cloud.google.com/go/datastream v1.10.3 h1:Z2sKPIB7bT2kMW5Uhxy44ZgdJzxzE5uKjavoW+EuHEE= cloud.google.com/go/datastream v1.10.3/go.mod h1:YR0USzgjhqA/Id0Ycu1VvZe8hEWwrkjuXrGbzeDOSEA= +cloud.google.com/go/deploy v1.15.0 h1:ZdmYzRMTGkVyP1nXEUat9FpbJGJemDcNcx82RSSOElc= cloud.google.com/go/deploy v1.15.0/go.mod h1:e5XOUI5D+YGldyLNZ21wbp9S8otJbBE4i88PtO9x/2g= +cloud.google.com/go/dialogflow v1.44.3 h1:cK/f88KX+YVR4tLH4clMQlvrLWD2qmKJQziusjGPjmc= cloud.google.com/go/dialogflow v1.44.3/go.mod h1:mHly4vU7cPXVweuB5R0zsYKPMzy240aQdAu06SqBbAQ= +cloud.google.com/go/dlp v1.11.1 h1:OFlXedmPP/5//X1hBEeq3D9kUVm9fb6ywYANlpv/EsQ= cloud.google.com/go/dlp v1.11.1/go.mod h1:/PA2EnioBeXTL/0hInwgj0rfsQb3lpE3R8XUJxqUNKI= +cloud.google.com/go/documentai v1.23.5 h1:KAlzT+q8qvRxAmhsJUvLtfFHH0PNvz3M79H6CgVBKL8= cloud.google.com/go/documentai v1.23.5/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g= +cloud.google.com/go/domains v0.9.4 h1:ua4GvsDztZ5F3xqjeLKVRDeOvJshf5QFgWGg1CKti3A= cloud.google.com/go/domains v0.9.4/go.mod h1:27jmJGShuXYdUNjyDG0SodTfT5RwLi7xmH334Gvi3fY= +cloud.google.com/go/edgecontainer v1.1.4 h1:Szy3Q/N6bqgQGyxqjI+6xJZbmvPvnFHp3UZr95DKcQ0= cloud.google.com/go/edgecontainer v1.1.4/go.mod h1:AvFdVuZuVGdgaE5YvlL1faAoa1ndRR/5XhXZvPBHbsE= +cloud.google.com/go/errorreporting v0.3.0 h1:kj1XEWMu8P0qlLhm3FwcaFsUvXChV/OraZwA70trRR0= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.6.5 h1:S2if6wkjR4JCEAfDtIiYtD+sTz/oXjh2NUG4cgT1y/Q= cloud.google.com/go/essentialcontacts v1.6.5/go.mod h1:jjYbPzw0x+yglXC890l6ECJWdYeZ5dlYACTFL0U/VuM= +cloud.google.com/go/eventarc v1.13.3 h1:+pFmO4eu4dOVipSaFBLkmqrRYG94Xl/TQZFOeohkuqU= cloud.google.com/go/eventarc v1.13.3/go.mod h1:RWH10IAZIRcj1s/vClXkBgMHwh59ts7hSWcqD3kaclg= +cloud.google.com/go/filestore v1.8.0 h1:/+wUEGwk3x3Kxomi2cP5dsR8+SIXxo7M0THDjreFSYo= cloud.google.com/go/filestore v1.8.0/go.mod h1:S5JCxIbFjeBhWMTfIYH2Jx24J6BqjwpkkPl+nBA5DlI= +cloud.google.com/go/firestore v1.14.0 h1:8aLcKnMPoldYU3YHgu4t2exrKhLQkqaXAGqT0ljrFVw= cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= +cloud.google.com/go/functions v1.15.4 h1:ZjdiV3MyumRM6++1Ixu6N0VV9LAGlCX4AhW6Yjr1t+U= cloud.google.com/go/functions v1.15.4/go.mod h1:CAsTc3VlRMVvx+XqXxKqVevguqJpnVip4DdonFsX28I= +cloud.google.com/go/gkebackup v1.3.4 h1:KhnOrr9A1tXYIYeXKqCKbCI8TL2ZNGiD3dm+d7BDUBg= cloud.google.com/go/gkebackup v1.3.4/go.mod h1:gLVlbM8h/nHIs09ns1qx3q3eaXcGSELgNu1DWXYz1HI= +cloud.google.com/go/gkeconnect v0.8.4 h1:1JLpZl31YhQDQeJ98tK6QiwTpgHFYRJwpntggpQQWis= cloud.google.com/go/gkeconnect v0.8.4/go.mod h1:84hZz4UMlDCKl8ifVW8layK4WHlMAFeq8vbzjU0yJkw= +cloud.google.com/go/gkehub v0.14.4 h1:J5tYUtb3r0cl2mM7+YHvV32eL+uZQ7lONyUZnPikCEo= cloud.google.com/go/gkehub v0.14.4/go.mod h1:Xispfu2MqnnFt8rV/2/3o73SK1snL8s9dYJ9G2oQMfc= +cloud.google.com/go/gkemulticloud v1.0.3 h1:NmJsNX9uQ2CT78957xnjXZb26TDIMvv+d5W2vVUt0Pg= cloud.google.com/go/gkemulticloud v1.0.3/go.mod h1:7NpJBN94U6DY1xHIbsDqB2+TFZUfjLUKLjUX8NGLor0= +cloud.google.com/go/grafeas v0.3.0 h1:oyTL/KjiUeBs9eYLw/40cpSZglUC+0F7X4iu/8t7NWs= cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8= +cloud.google.com/go/gsuiteaddons v1.6.4 h1:uuw2Xd37yHftViSI8J2hUcCS8S7SH3ZWH09sUDLW30Q= cloud.google.com/go/gsuiteaddons v1.6.4/go.mod h1:rxtstw7Fx22uLOXBpsvb9DUbC+fiXs7rF4U29KHM/pE= -cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= +cloud.google.com/go/iap v1.9.3 h1:M4vDbQ4TLXdaljXVZSwW7XtxpwXUUarY2lIs66m0aCM= cloud.google.com/go/iap v1.9.3/go.mod h1:DTdutSZBqkkOm2HEOTBzhZxh2mwwxshfD/h3yofAiCw= +cloud.google.com/go/ids v1.4.4 h1:VuFqv2ctf/A7AyKlNxVvlHTzjrEvumWaZflUzBPz/M4= cloud.google.com/go/ids v1.4.4/go.mod h1:z+WUc2eEl6S/1aZWzwtVNWoSZslgzPxAboS0lZX0HjI= +cloud.google.com/go/iot v1.7.4 h1:m1WljtkZnvLTIRYW1YTOv5A6H1yKgLHR6nU7O8yf27w= cloud.google.com/go/iot v1.7.4/go.mod h1:3TWqDVvsddYBG++nHSZmluoCAVGr1hAcabbWZNKEZLk= +cloud.google.com/go/kms v1.15.5 h1:pj1sRfut2eRbD9pFRjNnPNg/CzJPuQAzUujMIM1vVeM= cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI= +cloud.google.com/go/language v1.12.2 h1:zg9uq2yS9PGIOdc0Kz/l+zMtOlxKWonZjjo5w5YPG2A= cloud.google.com/go/language v1.12.2/go.mod h1:9idWapzr/JKXBBQ4lWqVX/hcadxB194ry20m/bTrhWc= +cloud.google.com/go/lifesciences v0.9.4 h1:rZEI/UxcxVKEzyoRS/kdJ1VoolNItRWjNN0Uk9tfexg= cloud.google.com/go/lifesciences v0.9.4/go.mod h1:bhm64duKhMi7s9jR9WYJYvjAFJwRqNj+Nia7hF0Z7JA= +cloud.google.com/go/logging v1.8.1 h1:26skQWPeYhvIasWKm48+Eq7oUqdcdbwsCVwz5Ys0FvU= cloud.google.com/go/logging v1.8.1/go.mod h1:TJjR+SimHwuC8MZ9cjByQulAMgni+RkXeI3wwctHJEI= -cloud.google.com/go/longrunning v0.5.2/go.mod h1:nqo6DQbNV2pXhGDbDMoN2bWz68MjZUzqv2YttZiveCs= +cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg= cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= +cloud.google.com/go/managedidentities v1.6.4 h1:SF/u1IJduMqQQdJA4MDyivlIQ4SrV5qAawkr/ZEREkY= cloud.google.com/go/managedidentities v1.6.4/go.mod h1:WgyaECfHmF00t/1Uk8Oun3CQ2PGUtjc3e9Alh79wyiM= +cloud.google.com/go/maps v1.6.1 h1:2+eMp/1MvMPp5qrSOd3vtnLKa/pylt+krVRqET3jWsM= cloud.google.com/go/maps v1.6.1/go.mod h1:4+buOHhYXFBp58Zj/K+Lc1rCmJssxxF4pJ5CJnhdz18= +cloud.google.com/go/mediatranslation v0.8.4 h1:VRCQfZB4s6jN0CSy7+cO3m4ewNwgVnaePanVCQh/9Z4= cloud.google.com/go/mediatranslation v0.8.4/go.mod h1:9WstgtNVAdN53m6TQa5GjIjLqKQPXe74hwSCxUP6nj4= +cloud.google.com/go/memcache v1.10.4 h1:cdex/ayDd294XBj2cGeMe6Y+H1JvhN8y78B9UW7pxuQ= cloud.google.com/go/memcache v1.10.4/go.mod h1:v/d8PuC8d1gD6Yn5+I3INzLR01IDn0N4Ym56RgikSI0= +cloud.google.com/go/metastore v1.13.3 h1:94l/Yxg9oBZjin2bzI79oK05feYefieDq0o5fjLSkC8= cloud.google.com/go/metastore v1.13.3/go.mod h1:K+wdjXdtkdk7AQg4+sXS8bRrQa9gcOr+foOMF2tqINE= +cloud.google.com/go/monitoring v1.16.3 h1:mf2SN9qSoBtIgiMA4R/y4VADPWZA7VCNJA079qLaZQ8= cloud.google.com/go/monitoring v1.16.3/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw= +cloud.google.com/go/networkconnectivity v1.14.3 h1:e9lUkCe2BexsqsUc2bjV8+gFBpQa54J+/F3qKVtW+wA= cloud.google.com/go/networkconnectivity v1.14.3/go.mod h1:4aoeFdrJpYEXNvrnfyD5kIzs8YtHg945Og4koAjHQek= +cloud.google.com/go/networkmanagement v1.9.3 h1:HsQk4FNKJUX04k3OI6gUsoveiHMGvDRqlaFM2xGyvqU= cloud.google.com/go/networkmanagement v1.9.3/go.mod h1:y7WMO1bRLaP5h3Obm4tey+NquUvB93Co1oh4wpL+XcU= +cloud.google.com/go/networksecurity v0.9.4 h1:947tNIPnj1bMGTIEBo3fc4QrrFKS5hh0bFVsHmFm4Vo= cloud.google.com/go/networksecurity v0.9.4/go.mod h1:E9CeMZ2zDsNBkr8axKSYm8XyTqNhiCHf1JO/Vb8mD1w= +cloud.google.com/go/notebooks v1.11.2 h1:eTOTfNL1yM6L/PCtquJwjWg7ZZGR0URFaFgbs8kllbM= cloud.google.com/go/notebooks v1.11.2/go.mod h1:z0tlHI/lREXC8BS2mIsUeR3agM1AkgLiS+Isov3SS70= +cloud.google.com/go/optimization v1.6.2 h1:iFsoexcp13cGT3k/Hv8PA5aK+FP7FnbhwDO9llnruas= cloud.google.com/go/optimization v1.6.2/go.mod h1:mWNZ7B9/EyMCcwNl1frUGEuY6CPijSkz88Fz2vwKPOY= +cloud.google.com/go/orchestration v1.8.4 h1:kgwZ2f6qMMYIVBtUGGoU8yjYWwMTHDanLwM/CQCFaoQ= cloud.google.com/go/orchestration v1.8.4/go.mod h1:d0lywZSVYtIoSZXb0iFjv9SaL13PGyVOKDxqGxEf/qI= +cloud.google.com/go/orgpolicy v1.11.4 h1:RWuXQDr9GDYhjmrredQJC7aY7cbyqP9ZuLbq5GJGves= cloud.google.com/go/orgpolicy v1.11.4/go.mod h1:0+aNV/nrfoTQ4Mytv+Aw+stBDBjNf4d8fYRA9herfJI= +cloud.google.com/go/osconfig v1.12.4 h1:OrRCIYEAbrbXdhm13/JINn9pQchvTTIzgmOCA7uJw8I= cloud.google.com/go/osconfig v1.12.4/go.mod h1:B1qEwJ/jzqSRslvdOCI8Kdnp0gSng0xW4LOnIebQomA= +cloud.google.com/go/oslogin v1.12.2 h1:NP/KgsD9+0r9hmHC5wKye0vJXVwdciv219DtYKYjgqE= cloud.google.com/go/oslogin v1.12.2/go.mod h1:CQ3V8Jvw4Qo4WRhNPF0o+HAM4DiLuE27Ul9CX9g2QdY= +cloud.google.com/go/phishingprotection v0.8.4 h1:sPLUQkHq6b4AL0czSJZ0jd6vL55GSTHz2B3Md+TCZI0= cloud.google.com/go/phishingprotection v0.8.4/go.mod h1:6b3kNPAc2AQ6jZfFHioZKg9MQNybDg4ixFd4RPZZ2nE= +cloud.google.com/go/policytroubleshooter v1.10.2 h1:sq+ScLP83d7GJy9+wpwYJVnY+q6xNTXwOdRIuYjvHT4= cloud.google.com/go/policytroubleshooter v1.10.2/go.mod h1:m4uF3f6LseVEnMV6nknlN2vYGRb+75ylQwJdnOXfnv0= +cloud.google.com/go/privatecatalog v0.9.4 h1:Vo10IpWKbNvc/z/QZPVXgCiwfjpWoZ/wbgful4Uh/4E= cloud.google.com/go/privatecatalog v0.9.4/go.mod h1:SOjm93f+5hp/U3PqMZAHTtBtluqLygrDrVO8X8tYtG0= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.33.0 h1:6SPCPvWav64tj0sVX/+npCBKhUi/UjJehy9op/V3p2g= cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsublite v1.8.1 h1:pX+idpWMIH30/K7c0epN6V703xpIcMXWRjKJsz0tYGY= cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= +cloud.google.com/go/recaptchaenterprise/v2 v2.8.4 h1:KOlLHLv3h3HwcZAkx91ubM3Oztz3JtT3ZacAJhWDorQ= cloud.google.com/go/recaptchaenterprise/v2 v2.8.4/go.mod h1:Dak54rw6lC2gBY8FBznpOCAR58wKf+R+ZSJRoeJok4w= +cloud.google.com/go/recommendationengine v0.8.4 h1:JRiwe4hvu3auuh2hujiTc2qNgPPfVp+Q8KOpsXlEzKQ= cloud.google.com/go/recommendationengine v0.8.4/go.mod h1:GEteCf1PATl5v5ZsQ60sTClUE0phbWmo3rQ1Js8louU= +cloud.google.com/go/recommender v1.11.3 h1:VndmgyS/J3+izR8V8BHa7HV/uun8//ivQ3k5eVKKyyM= cloud.google.com/go/recommender v1.11.3/go.mod h1:+FJosKKJSId1MBFeJ/TTyoGQZiEelQQIZMKYYD8ruK4= +cloud.google.com/go/redis v1.14.1 h1:J9cEHxG9YLmA9o4jTSvWt/RuVEn6MTrPlYSCRHujxDQ= cloud.google.com/go/redis v1.14.1/go.mod h1:MbmBxN8bEnQI4doZPC1BzADU4HGocHBk2de3SbgOkqs= +cloud.google.com/go/resourcemanager v1.9.4 h1:JwZ7Ggle54XQ/FVYSBrMLOQIKoIT/uer8mmNvNLK51k= cloud.google.com/go/resourcemanager v1.9.4/go.mod h1:N1dhP9RFvo3lUfwtfLWVxfUWq8+KUQ+XLlHLH3BoFJ0= +cloud.google.com/go/resourcesettings v1.6.4 h1:yTIL2CsZswmMfFyx2Ic77oLVzfBFoWBYgpkgiSPnC4Y= cloud.google.com/go/resourcesettings v1.6.4/go.mod h1:pYTTkWdv2lmQcjsthbZLNBP4QW140cs7wqA3DuqErVI= +cloud.google.com/go/retail v1.14.4 h1:geqdX1FNqqL2p0ADXjPpw8lq986iv5GrVcieTYafuJQ= cloud.google.com/go/retail v1.14.4/go.mod h1:l/N7cMtY78yRnJqp5JW8emy7MB1nz8E4t2yfOmklYfg= +cloud.google.com/go/run v1.3.3 h1:qdfZteAm+vgzN1iXzILo3nJFQbzziudkJrvd9wCf3FQ= cloud.google.com/go/run v1.3.3/go.mod h1:WSM5pGyJ7cfYyYbONVQBN4buz42zFqwG67Q3ch07iK4= +cloud.google.com/go/scheduler v1.10.5 h1:eMEettHlFhG5pXsoHouIM5nRT+k+zU4+GUvRtnxhuVI= cloud.google.com/go/scheduler v1.10.5/go.mod h1:MTuXcrJC9tqOHhixdbHDFSIuh7xZF2IysiINDuiq6NI= +cloud.google.com/go/secretmanager v1.11.4 h1:krnX9qpG2kR2fJ+u+uNyNo+ACVhplIAS4Pu7u+4gd+k= cloud.google.com/go/secretmanager v1.11.4/go.mod h1:wreJlbS9Zdq21lMzWmJ0XhWW2ZxgPeahsqeV/vZoJ3w= +cloud.google.com/go/security v1.15.4 h1:sdnh4Islb1ljaNhpIXlIPgb3eYj70QWgPVDKOUYvzJc= cloud.google.com/go/security v1.15.4/go.mod h1:oN7C2uIZKhxCLiAAijKUCuHLZbIt/ghYEo8MqwD/Ty4= +cloud.google.com/go/securitycenter v1.24.2 h1:qCEyXoJoxNKKA1bDywBjjqCB7ODXazzHnVWnG5Uqd1M= cloud.google.com/go/securitycenter v1.24.2/go.mod h1:l1XejOngggzqwr4Fa2Cn+iWZGf+aBLTXtB/vXjy5vXM= +cloud.google.com/go/servicedirectory v1.11.3 h1:5niCMfkw+jifmFtbBrtRedbXkJm3fubSR/KHbxSJZVM= cloud.google.com/go/servicedirectory v1.11.3/go.mod h1:LV+cHkomRLr67YoQy3Xq2tUXBGOs5z5bPofdq7qtiAw= +cloud.google.com/go/shell v1.7.4 h1:nurhlJcSVFZneoRZgkBEHumTYf/kFJptCK2eBUq/88M= cloud.google.com/go/shell v1.7.4/go.mod h1:yLeXB8eKLxw0dpEmXQ/FjriYrBijNsONpwnWsdPqlKM= +cloud.google.com/go/spanner v1.53.0 h1:/NzWQJ1MEhdRcffiutRKbW/AIGVKhcTeivWTDjEyCCo= cloud.google.com/go/spanner v1.53.0/go.mod h1:liG4iCeLqm5L3fFLU5whFITqP0e0orsAW1uUSrd4rws= +cloud.google.com/go/speech v1.21.0 h1:qkxNao58oF8ghAHE1Eghen7XepawYEN5zuZXYWaUTA4= cloud.google.com/go/speech v1.21.0/go.mod h1:wwolycgONvfz2EDU8rKuHRW3+wc9ILPsAWoikBEWavY= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +cloud.google.com/go/storagetransfer v1.10.3 h1:YM1dnj5gLjfL6aDldO2s4GeU8JoAvH1xyIwXre63KmI= cloud.google.com/go/storagetransfer v1.10.3/go.mod h1:Up8LY2p6X68SZ+WToswpQbQHnJpOty/ACcMafuey8gc= +cloud.google.com/go/talent v1.6.5 h1:LnRJhhYkODDBoTwf6BeYkiJHFw9k+1mAFNyArwZUZAs= cloud.google.com/go/talent v1.6.5/go.mod h1:Mf5cma696HmE+P2BWJ/ZwYqeJXEeU0UqjHFXVLadEDI= +cloud.google.com/go/texttospeech v1.7.4 h1:ahrzTgr7uAbvebuhkBAAVU6kRwVD0HWsmDsvMhtad5Q= cloud.google.com/go/texttospeech v1.7.4/go.mod h1:vgv0002WvR4liGuSd5BJbWy4nDn5Ozco0uJymY5+U74= +cloud.google.com/go/tpu v1.6.4 h1:XIEH5c0WeYGaVy9H+UueiTaf3NI6XNdB4/v6TFQJxtE= cloud.google.com/go/tpu v1.6.4/go.mod h1:NAm9q3Rq2wIlGnOhpYICNI7+bpBebMJbh0yyp3aNw1Y= +cloud.google.com/go/trace v1.10.4 h1:2qOAuAzNezwW3QN+t41BtkDJOG42HywL73q8x/f6fnM= cloud.google.com/go/trace v1.10.4/go.mod h1:Nso99EDIK8Mj5/zmB+iGr9dosS/bzWCJ8wGmE6TXNWY= +cloud.google.com/go/translate v1.9.3 h1:t5WXTqlrk8VVJu/i3WrYQACjzYJiff5szARHiyqqPzI= cloud.google.com/go/translate v1.9.3/go.mod h1:Kbq9RggWsbqZ9W5YpM94Q1Xv4dshw/gr/SHfsl5yCZ0= +cloud.google.com/go/video v1.20.3 h1:Xrpbm2S9UFQ1pZEeJt9Vqm5t2T/z9y/M3rNXhFoo8Is= cloud.google.com/go/video v1.20.3/go.mod h1:TnH/mNZKVHeNtpamsSPygSR0iHtvrR/cW1/GDjN5+GU= +cloud.google.com/go/videointelligence v1.11.4 h1:YS4j7lY0zxYyneTFXjBJUj2r4CFe/UoIi/PJG0Zt/Rg= cloud.google.com/go/videointelligence v1.11.4/go.mod h1:kPBMAYsTPFiQxMLmmjpcZUMklJp3nC9+ipJJtprccD8= +cloud.google.com/go/vision/v2 v2.7.5 h1:T/ujUghvEaTb+YnFY/jiYwVAkMbIC8EieK0CJo6B4vg= cloud.google.com/go/vision/v2 v2.7.5/go.mod h1:GcviprJLFfK9OLf0z8Gm6lQb6ZFUulvpZws+mm6yPLM= +cloud.google.com/go/vmmigration v1.7.4 h1:qPNdab4aGgtaRX+51jCOtJxlJp6P26qua4o1xxUDjpc= cloud.google.com/go/vmmigration v1.7.4/go.mod h1:yBXCmiLaB99hEl/G9ZooNx2GyzgsjKnw5fWcINRgD70= +cloud.google.com/go/vmwareengine v1.0.3 h1:WY526PqM6QNmFHSqe2sRfK6gRpzWjmL98UFkql2+JDM= cloud.google.com/go/vmwareengine v1.0.3/go.mod h1:QSpdZ1stlbfKtyt6Iu19M6XRxjmXO+vb5a/R6Fvy2y4= +cloud.google.com/go/vpcaccess v1.7.4 h1:zbs3V+9ux45KYq8lxxn/wgXole6SlBHHKKyZhNJoS+8= cloud.google.com/go/vpcaccess v1.7.4/go.mod h1:lA0KTvhtEOb/VOdnH/gwPuOzGgM+CWsmGu6bb4IoMKk= +cloud.google.com/go/webrisk v1.9.4 h1:iceR3k0BCRZgf2D/NiKviVMFfuNC9LmeNLtxUFRB/wI= cloud.google.com/go/webrisk v1.9.4/go.mod h1:w7m4Ib4C+OseSr2GL66m0zMBywdrVNTDKsdEsfMl7X0= +cloud.google.com/go/websecurityscanner v1.6.4 h1:5Gp7h5j7jywxLUp6NTpjNPkgZb3ngl0tUSw6ICWvtJQ= cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzSHJ45+fY4F52nZFDHm2o= +cloud.google.com/go/workflows v1.12.3 h1:qocsqETmLAl34mSa01hKZjcqAvt699gaoFbooGGMvaM= cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= -collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 h1:Ut0ZGdOwJDw0npYEg+TLlPls3Pq6JiZaP2/aGKir7Zw= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 h1:jp0dGvZ7ZK0mgqnTSClMxa5xuRL7NZgHameVYF6BurY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA= +github.com/Azure/azure-storage-blob-go v0.7.0 h1:MuueVOYkufCxJw5YZzF842DY2MBsp+hLuh2apKY0mck= github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.6.0 h1:XMEdVDFxgulDDl0lQmAZS6j8gRQ/0pJ+ZpXH2FHVtDc= github.com/AzureAD/microsoft-authentication-library-for-go v0.6.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agnivade/levenshtein v1.0.1 h1:3oJU7J3FGFmyhn8KHjmVaZCN5hxTr7GxgRue+sxIXdQ= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/akavel/rsrc v0.10.2 h1:Zxm8V5eI1hW4gGaYsJQUhxpjkENuG91ki8B4zCrvEsw= github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= -github.com/alecthomas/kingpin/v2 v2.3.1/go.mod h1:oYL5vtsvEHZGHxU7DMp32Dvx+qL+ptGn6lWaot2vCNE= +github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves= github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/arrow/go/v12 v12.0.0 h1:xtZE63VWl7qLdB0JObIXvvhGjoVNrQ9ciIHG2OK5cmc= github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= +github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/apparentlymart/go-cidr v1.0.1 h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U= github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo= -github.com/aws/aws-sdk-go-v2 v1.17.6 h1:Y773UK7OBqhzi5VDXMi1zVGsoj+CVHs2eaC2bDsLwi0= -github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= -github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y= -github.com/aws/aws-sdk-go-v2/config v1.18.16 h1:4r7gsCu8Ekwl5iJGE/GmspA2UifqySCCkyyyPFeWs3w= -github.com/aws/aws-sdk-go-v2/config v1.18.16/go.mod h1:XjM6lVbq7UgELp9NjXBrb1DQY/ownlWsvDhEQksemJc= -github.com/aws/aws-sdk-go-v2/credentials v1.1.1/go.mod h1:mM2iIjwl7LULWtS6JCACyInboHirisUUdkBPoTHMOUo= -github.com/aws/aws-sdk-go-v2/credentials v1.13.16 h1:GgToSxaENX/1zXIGNFfiVk4hxryYJ5Vt4Mh8XLAL7Lc= -github.com/aws/aws-sdk-go-v2/credentials v1.13.16/go.mod h1:KP7aFJhfwPFgx9aoVYL2nYHjya5WBD98CWaadpgmnpY= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2/go.mod h1:3hGg3PpiEjHnrkrlasTfxFqUsZ2GCk/fMUn4CbKgSkM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24 h1:5qyqXASrX2zy5cTnoHHa4N2c3Lc94GH7gjnBP3GwKdU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24/go.mod h1:neYVaeKr5eT7BzwULuG2YbLhzWZ22lpjKdCybR7AXrQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.56 h1:kFDCPqqVvb9vYcW82L7xYfrBGpuxXQ/8A/zYVayRQK4= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.56/go.mod h1:FoSBuessadgy8Cqp9gQF8U5rzi1XVQhiEJ6su2/kBEE= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 h1:y+8n9AGDjikyXoMBTRaHHHSaFEB8267ykmvyPodJfys= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30/go.mod h1:LUBAO3zNXQjoONBKn/kR1y0Q4cj/D02Ts0uHYjcCQLM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 h1:r+Kv+SEJquhAZXaJ7G4u44cIwXV3f8K+N482NNAzJZA= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24/go.mod h1:gAuCezX/gob6BSMbItsSlMb6WZGV7K2+fWOvk8xBSto= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.31 h1:hf+Vhp5WtTdcSdE+yEcUz8L73sAzN0R+0jQv+Z51/mI= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.31/go.mod h1:5zUjguZfG5qjhG9/wqmuyHRyUftl2B5Cp6NNxNC6kRA= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.22 h1:lTqBRUuy8oLhBsnnVZf14uRbIHPHCrGqg4Plc8gU/1U= github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.22/go.mod h1:YsOa3tFriwWNvBPYHXM5ARiU2yqBNWPWeUiq+4i7Na0= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.25 h1:B/hO3jfWRm7hP00UeieNlI5O2xP5WJ27tyJG5lzc7AM= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.25/go.mod h1:54K1zgxK/lai3a4HosE4IKBwZsP/5YAJ6dzJfwsjJ0U= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2/go.mod h1:45MfaXZ0cNbeuT0KQ1XJylq8A6+OpVV2E5kvY/Kq+u8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.24 h1:c5qGfdbCHav6viBwiyDns3OXqhqAbGjfIB4uVu2ayhk= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.24/go.mod h1:HMA4FZG6fyib+NDo5bpIxX1EhYjrAOveZJY2YR0xrNE= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.24 h1:i4RH8DLv/BHY0fCrXYQDr+DGnWzaxB3Ee/esxUaSavk= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.24/go.mod h1:N8X45/o2cngvjCYi2ZnvI0P4mU4ZRJfEYC3maCSsPyw= +github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1 h1:cKr6St+CtC3/dl/rEBJvlk7A/IN5D5F02GNkGzfbtVU= github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7NkwbjlijluLsrIbu/iyl35RO4= +github.com/aws/aws-sdk-go-v2/service/s3 v1.30.6 h1:zzTm99krKsFcF4N7pu2z17yCcAZpQYZ7jnJZPIgEMXE= github.com/aws/aws-sdk-go-v2/service/s3 v1.30.6/go.mod h1:PudwVKUTApfm0nYaPutOXaKdPKTlZYClGBQpVIRdcbs= -github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.5 h1:bdKIX6SVF3nc3xJFw6Nf0igzS6Ff/louGq8Z6VP/3Hs= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.5/go.mod h1:vuWiaDB30M/QTC+lI3Wj6S/zb7tpUK2MSYgy3Guh2L0= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.5 h1:xLPZMyuZ4GuqRCIec/zWuIhRFPXh2UOJdLXBSi64ZWQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.5/go.mod h1:QjxpHmCwAg0ESGtPQnLIVp7SedTOBMYy+Slr3IfMKeI= -github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.6 h1:rIFn5J3yDoeuKCE9sESXqM5POTAhOP1du3bv/qTL+tE= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.6/go.mod h1:48WJ9l3dwP0GSHWGc5sFGGlCkuA82Mc2xnw+T6Q8aDw= -github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= -github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= -github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= +github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd h1:qdGvebPBDuYDPGi1WCPjy1tGyMpmDK8IEapSsszn7HE= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723 h1:ZA/jbKoGcVAnER6pCHPEkGdZOV7U1oLUedErBHCUMs0= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= +github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4= github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a h1:8d1CEOF1xldesKds5tRG3tExBsMOgWYownMHNCsev54= github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2ZOiguV6/NpiDgADjRLPNyZlApIWxKpkT+X8SdY= -github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ= +github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu2VvzMZlOFA= github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= +github.com/cloudflare/redoctober v0.0.0-20211013234631-6a74ccc611f6 h1:QKzett0dn5FhjcIHNKSClEilabfhWCnsdijq3ftm9Ms= github.com/cloudflare/redoctober v0.0.0-20211013234631-6a74ccc611f6/go.mod h1:Ikt4Wfpln1YOrak+auA8BNxgiilj0Y2y7nO+aN2eMzk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/compose-spec/compose-go v1.20.0 h1:h4ZKOst1EF/DwZp7dWkb+wbTVE4nEyT9Lc89to84Ol4= github.com/compose-spec/compose-go v1.20.0/go.mod h1:+MdqXV4RA7wdFsahh/Kb8U0pAJqkg7mr4PM9tFKU8RM= -github.com/compose-spec/compose-go/v2 v2.0.0-rc.2 h1:eJ01FpliL/02KvsaPyH1bSLbM1S70yWQUojHVRbyvy4= -github.com/compose-spec/compose-go/v2 v2.0.0-rc.2/go.mod h1:IVsvFyGVhw4FASzUtlWNVaAOhYmakXAFY9IlZ7LAuD8= +github.com/container-orchestrated-devices/container-device-interface v0.6.1 h1:mz77uJoP8im/4Zins+mPqt677ZMaflhoGaYrRAl5jvA= github.com/container-orchestrated-devices/container-device-interface v0.6.1/go.mod h1:40T6oW59rFrL/ksiSs7q45GzjGlbvxnA4xaK6cyq+kA= +github.com/containerd/aufs v1.0.0 h1:2oeJiwX5HstO7shSrPZjrohJZLzK36wvpdmzDRkL/LY= github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs/v2 v2.0.0 h1:FN4wsx7KQrYoLXN7uLP0vBV4oVWHOIKDRQ1G2Z0oL5M= github.com/containerd/btrfs/v2 v2.0.0/go.mod h1:swkD/7j9HApWpzl8OHfrHNxppPd9l44DFZdF94BUj9k= +github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= +github.com/containerd/fuse-overlayfs-snapshotter v1.0.2 h1:Xy9Tkx0tk/SsMfLDFc69wzqSrxQHYEFELHBO/Z8XO3M= github.com/containerd/fuse-overlayfs-snapshotter v1.0.2/go.mod h1:nRZceC8a7dRm3Ao6cJAwuJWPFiBPaibHiFntRUnzhwU= +github.com/containerd/go-cni v1.1.9 h1:ORi7P1dYzCwVM6XPN4n3CbkuOx/NZ2DOqy+SHRdo9rU= github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM= +github.com/containerd/go-runc v1.1.0 h1:OX4f+/i2y5sUT7LhmcJH7GYrjjhHa1QI4e8yO0gGleA= github.com/containerd/go-runc v1.1.0/go.mod h1:xJv2hFF7GvHtTJd9JqTS2UVxMkULUYw4JN5XAUZqH5U= +github.com/containerd/imgcrypt v1.1.7 h1:WSf9o9EQ0KGHiUx2ESFZ+PKf4nxK9BcvV/nJDX8RkB4= github.com/containerd/imgcrypt v1.1.7/go.mod h1:FD8gqIcX5aTotCtOmjeCsi3A1dHmTZpnMISGKSczt4k= +github.com/containerd/nri v0.4.0 h1:PjgIBm0RtUiFyEO6JqPBQZRQicbsIz41Fz/5VSC0zgw= github.com/containerd/nri v0.4.0/go.mod h1:Zw9q2lP16sdg0zYybemZ9yTDy8g7fPCIB3KXOGlggXI= github.com/containerd/stargz-snapshotter v0.14.3/go.mod h1:j2Ya4JeA5gMZJr8BchSkPjlcCEh++auAxp4nidPI6N0= github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v1.1.0 h1:n7OZ7jZumLIqNJqXrEc/paBM840mORnmGdJDmAmJZHM= github.com/containerd/zfs v1.1.0/go.mod h1:oZF9wBnrnQjpWLaPKEinrx3TQ9a+W/RJO7Zb41d8YLE= +github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU= github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= +github.com/containers/ocicrypt v1.1.6 h1:uoG52u2e91RE4UqmBICZY8dNshgfvkdl3BW6jnxiFaI= github.com/containers/ocicrypt v1.1.6/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc= +github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= +github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= +github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7 h1:6IrxszG5G+O7zhtkWxq6+unVvnrm1fqV2Pe+T95DUzw= -github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7/go.mod h1:gFnFS95y8HstDP6P9pPwzrxOOC5TRDkwbM+ao15ChAI= -github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 h1:DuBDHVjgGMPki7bAyh91+3cF1Vh34sAEdH8JQgbc2R0= -github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI= -github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= -github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= -github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c h1:/ovYnF02fwL0kvspmy9AuyKg1JhdTRUgPw4nUxd9oZM= -github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE= github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= -github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/dchest/blake512 v1.0.0 h1:oDFEQFIqFSeuA34xLtXZ/rWxCXdSjirjzPhey5EUvmA= github.com/dchest/blake512 v1.0.0/go.mod h1:FV1x7xPPLWukZlpDpWQ88rF/SFwZ5qbskrzhLMB92JI= -github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= -github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= -github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= -github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73 h1:OGNva6WhsKst5OZf7eZOklDztV3hwtTHovdrLHV+MsA= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954 h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/docker/cli v25.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli-docs-tool v0.6.0 h1:Z9x10SaZgFaB6jHgz3OWooynhSa40CsWkpe5hEnG/qA= github.com/docker/cli-docs-tool v0.6.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o= -github.com/docker/compose/v2 v2.24.3 h1:BVc1oDV7aQgksH64pDKTvcI95G36uJ+Mz9DGGBBoZeQ= -github.com/docker/compose/v2 v2.24.3/go.mod h1:D8Nv9+juzD7xiMyyHJ7G2J/MOYiGBmb9SvdIW5+2zKo= -github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= +github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 h1:iZOop7pqsg+56twTopWgwCGxdB5SI2yDO8Ti7eTRliQ= github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= -github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= -github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= -github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae h1:UTOyRlLeWJrZx+ynml6q6qzZ1uDkJe/0Z5CMZRbEIJg= +github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y= +github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA= +github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c= +github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s= github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= -github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88= github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= -github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= -github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= -github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getsentry/sentry-go v0.11.0 h1:qro8uttJGvNAMr5CLcFI9CHR0aDzXl0Vs3Pmw/oTPg8= github.com/getsentry/sentry-go v0.11.0/go.mod h1:KBQIxiZAetw62Cj8Ri964vAEWVdgfaUCn30Q3bCvANo= -github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M= github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= -github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pSrTMoa9+EiY7igmkM= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw= github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk= -github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= -github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gotestyourself/gotestyourself v1.4.0 h1:CDSlSIuRL/Fsc72Ln5lMybtrCvSRDddsHsDRG/nP7Rg= github.com/gotestyourself/gotestyourself v1.4.0/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKhEfPFg8W0Enm61B9Gs911H8iesGY5R8NDPtd1M= github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= -github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= -github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hanwen/go-fuse/v2 v2.2.0 h1:jo5QZYmBLNcl9ovypWaQ5yXMSSV+Ch68xoC3rtZvvBM= github.com/hanwen/go-fuse/v2 v2.2.0/go.mod h1:B1nGE/6RBFyBRC1RRnf23UpwCdyJ31eukw34oAKukAc= +github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992 h1:fYOrSfO5C9PmFGtmRWSYGqq52SOoE2dXMtAn2Xzh1LQ= github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992/go.mod h1:Abjk0jbRkDaNCzsRhOv2iDCofYpX1eVsjozoiK63qLA= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= -github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= -github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/iden3/go-iden3-crypto v0.0.12/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6PbTowutSew/iBEoo= -github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8= github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= github.com/influxdata/influxdb-client-go/v2 v2.4.0 h1:HGBfZYStlx3Kqvsv1h2pJixbCl/jhnFtxpKFAv9Tu5k= github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= -github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= -github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= -github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 h1:vilfsDSy7TDxedi9gyBkMvAirat/oRcL0lFdJBf6tdM= github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= -github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= -github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= -github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= -github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/intel/goresctrl v0.3.0 h1:K2D3GOzihV7xSBedGxONSlaw/un1LZgWsc9IfqipN4c= github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk= -github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8= -github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89 h1:12K8AlpT0/6QUXSfV0yi4Q0jkbq8NDtIKFtF61AoqV0= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= +github.com/josephspurrier/goversioninfo v1.4.0 h1:Puhl12NSHUSALHSuzYwPYQkqa2E1+7SrtAPJorKK0C8= github.com/josephspurrier/goversioninfo v1.4.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= +github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= +github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI= +github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559 h1:0VWDXPNE0brOek1Q8bLfzKkvOzwbQE/snjGojlCr8CY= github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46 h1:veS9QfglfvqAw2e+eeNT/SbGySq8ajECXJ9e4fPoLhY= github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE= +github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= -github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/knz/go-libedit v1.10.1 h1:0pHpWtx9vcvC0xGZqEQlQdfSQs7WRlAjuPvk3fOZDCo= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= +github.com/kylelemons/go-gypsy v1.0.0 h1:7/wQ7A3UL1bnqRMnZ6T8cwCOArfZCxFmb1iTxaOOo1s= github.com/kylelemons/go-gypsy v1.0.0/go.mod h1:chkXM0zjdpXOiqkCW1XcCHDfjfk14PH2KKkQWxfJUcU= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/lestrrat-go/backoff/v2 v2.0.8 h1:oNb5E5isby2kiro9AgdHLv5N5tint1AnDVVf2E2un5A= github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y= +github.com/lestrrat-go/blackmagic v1.0.0 h1:XzdxDbuQTz0RZZEmdU7cnQxUtFUzgCSPq8RCz4BxIi4= github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ= +github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/iter v1.0.1 h1:q8faalr2dY6o8bV45uwrxq12bRa1ezKrB6oM9FUgN4A= github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc= +github.com/lestrrat-go/jwx v1.2.25 h1:tAx93jN2SdPvFn08fHNAhqFJazn5mBBOB8Zli0g0otA= github.com/lestrrat-go/jwx v1.2.25/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY= +github.com/lestrrat-go/option v1.0.0 h1:WqAWL8kh8VcSoD6xjSH34/1m8yxluXQbDeKNfvFeEO4= github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 h1:jUp75lepDg0phMUJBCmvaeFDldD2N3S1lBuPwUTszio= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= -github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/microsoft/go-mssqldb v1.6.0 h1:mM3gYdVwEPFrlg/Dvr2DNVEgYFG7L42l+dGc67NNNpc= github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= +github.com/mmcloughlin/profile v0.1.1 h1:jhDmAqPyebOsVDOCICJoINoLb/AnLBaUw58nFzxWS2w= +github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs= github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86 h1:D6paGObi5Wud7xg83MaEFyjxQB1W5bz5d0IFppr+ymk= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c h1:bY6ktFuJkt+ZXkX0RChQch2FtHpWQLVS8Qo1YasiIVk= github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/open-policy-agent/opa v0.42.2 h1:qocVAKyjrqMjCqsU02S/gHyLr4AQQ9xMtuV1kKnnyhM= github.com/open-policy-agent/opa v0.42.2/go.mod h1:MrmoTi/BsKWT58kXlVayBb+rYVeaMwuBm3nYAN3923s= +github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 h1:DmNGcqH3WDbV5k8OJ+esPWbqUOX5rMLR2PMvziDMJi0= github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= +github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 h1:DiLBVp4DAcZlBVBEtJpNWZpZVq0AEeCY7Hqk8URVs4o= github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170/go.mod h1:uQd4a7Rh3ZsVg5j0lNyAfyxIeGde9yrlhjF78GzeW0c= -github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= +github.com/pkg/profile v1.5.0 h1:042Buzk+NhDI+DeSAA62RwJL8VAuZUMQZUjCsRz1Mug= github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= -github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= +github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sanity-io/litter v1.5.5/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U= -github.com/schollz/closestmatch v2.1.0+incompatible h1:Uel2GXEpJqOWBrlyI+oY9LTiyyjYS17cCYRqP13/SHk= -github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= -github.com/sclevine/agouti v3.0.0+incompatible h1:8IBJS6PWz3uTlMP3YBIR5f+KAldcGuOeFkFbUWfBgK4= -github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= -github.com/scroll-tech/da-codec v0.0.0-20240515170552-2e5286688fce h1:SN43TBs7VaJt9q737eWWqGz0OCg4v+PtUn3RbJcG1o0= -github.com/scroll-tech/da-codec v0.0.0-20240515170552-2e5286688fce/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM= -github.com/scroll-tech/da-codec v0.0.0-20240605080813-32bfc9fccde7/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM= -github.com/scroll-tech/da-codec v0.0.0-20240708144942-a554fe417a39 h1:1gg+HPuirILVVVugZczq7ZQtPdCC/a38pwaFBqFtt1o= -github.com/scroll-tech/da-codec v0.0.0-20240708144942-a554fe417a39/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= -github.com/scroll-tech/da-codec v0.0.0-20240711065717-10bc0bed2be3 h1:QDiMz4RVjNigz4sHh1ieMe4nl/ca+MrXxyaVf0IKePk= -github.com/scroll-tech/da-codec v0.0.0-20240711065717-10bc0bed2be3/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= -github.com/scroll-tech/go-ethereum v1.10.14-0.20221202061207-804e7edc23ba/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0= -github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0= -github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod h1:OH4ZTAz6RM1IL0xcQ1zM6+Iy9s2vtcYqqwcEQdfHV7g= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230220082843-ec9254b0b1c6/go.mod h1:eW+eyNdMoO0MyuczCc9xWSnW8dPJ0kOy5xsxgOKYEaA= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230306131930-03b4de32b78b/go.mod h1:f9ygxrxL7WRCTzuloV+t/UlcxMq3AL+gcNU60liiNNU= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04/go.mod h1:jH8c08L9K8Hieaf0r/ur2P/cpesn4dFhmLm2Mmoi8kI= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230802095950-4b2bbf6225e7/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA= -github.com/scroll-tech/zktrie v0.6.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= -github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= -github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY= -github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= -github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil/v3 v3.22.8 h1:a4s3hXogo5mE2PfdfJIonDbstO/P+9JszdfhAHSzD9Y= -github.com/shirou/gopsutil/v3 v3.22.8/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI= github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636 h1:aSISeOcal5irEhJd1M+IrApc0PdcN7e7Aj4yuEnOrfQ= github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= +github.com/spiffe/go-spiffe/v2 v2.1.1 h1:RT9kM8MZLZIsPTH+HKQEP5yaAk3yd/VBzlINaRjXs8k= github.com/spiffe/go-spiffe/v2 v2.1.1/go.mod h1:5qg6rpqlwIub0JAiF1UK9IMD6BpPTmvG6yfSgDBs5lg= -github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= -github.com/tdewolff/minify/v2 v2.12.4 h1:kejsHQMM17n6/gwdw53qsi6lg0TGddZADVyQOz1KMdE= -github.com/tdewolff/minify/v2 v2.12.4/go.mod h1:h+SRvSIX3kwgwTFOpSckvSxgax3uy8kZTSF1Ojrr3bk= -github.com/tdewolff/parse/v2 v2.6.4 h1:KCkDvNUMof10e3QExio9OPZJT8SbdKojLBumw8YZycQ= -github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs= -github.com/tdewolff/test v1.0.7 h1:8Vs0142DmPFW/bQeHRP3MV19m1gvndjUb1sn8yy74LM= -github.com/tdewolff/test v1.0.7/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE= -github.com/testcontainers/testcontainers-go v0.28.0 h1:1HLm9qm+J5VikzFDYhOd+Zw12NtOl+8drH2E8nTY1r8= -github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU= -github.com/testcontainers/testcontainers-go/modules/compose v0.28.0 h1:QOCeTYZIYixg796Ik60MOaeMgpAKPbQd5pJOdTrftyg= -github.com/testcontainers/testcontainers-go/modules/compose v0.28.0/go.mod h1:lShXm8oldlLck3ltA5u+ShSvUnZ+wiNxwpp8wAQGZ1Y= -github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0 h1:ff0s4JdYIdNAVSi/SrpN2Pdt1f+IjIw3AKjbHau8Un4= -github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0/go.mod h1:fXgcYpbyrduNdiz2qRZuYkmvqLnEqsjbQiBNYH1ystI= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8= -github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= -github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= -github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 h1:8eY6m1mjgyB8XySUR7WvebTM8D/Vs86jLJzD/Tw7zkc= github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7/go.mod h1:qqvyZqkfwkoJuPU/bw61bItaoO0SJ8YSW0vSVRRvsRg= +github.com/tonistiigi/go-archvariant v1.0.0 h1:5LC1eDWiBNflnTF1prCiX09yfNHIxDC/aukdhCdTyb0= github.com/tonistiigi/go-archvariant v1.0.0/go.mod h1:TxFmO5VS6vMq2kvs3ht04iPXtu2rUT/erOnGFYfk5Ho= -github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo= github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8= github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= -github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/vektah/gqlparser/v2 v2.4.5 h1:C02NsyEsL4TXJB7ndonqTfuQOL4XPIu0aAWugdmTgmc= github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0= +github.com/veraison/go-cose v1.0.0-rc.1 h1:4qA7dbFJGvt7gcqv5MCIyCQvN+NpHFPkW7do3EeDLb8= github.com/veraison/go-cose v1.0.0-rc.1/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4= +github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xhit/go-str2duration v1.2.0 h1:BcV5u025cITWxEQKGWr1URRzrcXtu7uk8+luz3Yuhwc= github.com/xhit/go-str2duration v1.2.0/go.mod h1:3cPSlfZlUHVlneIVfePFWcJZsuwf+P1v2SRTV4cUmp4= +github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= -github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow= +github.com/yashtewari/glob-intersection v0.1.0 h1:6gJvMYQlTDOL3dMsPF6J0+26vwX9MB8/1q3uAdhmTrg= github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= +github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zeebo/errs v1.2.2 h1:5NFypMTuSdoySVTqlNs1dEoU21QVamMQJxW/Fii5O7g= github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd/api/v3 v3.5.5 h1:BX4JIbQ7hl7+jL+g+2j5UAr0o1bctCm6/Ct+ArBGkf0= go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= +go.etcd.io/etcd/client/pkg/v3 v3.5.5 h1:9S0JUVvmrVl7wCF39iTQthdaaNIiAaQbmK75ogO6GU8= go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= +go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI= go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4= +go.etcd.io/etcd/client/v3 v3.5.5 h1:q++2WTJbUgpQu4B6hCuT7VkdwaTP7Qz6Daak3WzbrlI= go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c= +go.etcd.io/etcd/pkg/v3 v3.5.5 h1:Ablg7T7OkR+AeeeU32kdVhw/AGDsitkKPl7aW73ssjU= go.etcd.io/etcd/pkg/v3 v3.5.5/go.mod h1:6ksYFxttiUGzC2uxyqiyOEvhAiD0tuIqSZkX3TyPdaE= +go.etcd.io/etcd/raft/v3 v3.5.5 h1:Ibz6XyZ60OYyRopu73lLM/P+qco3YtlZMOhnXNS051I= go.etcd.io/etcd/raft/v3 v3.5.5/go.mod h1:76TA48q03g1y1VpTue92jZLr9lIHKUNcYdZOOGyx8rI= +go.etcd.io/etcd/server/v3 v3.5.5 h1:jNjYm/9s+f9A9r6+SC4RvNaz6AqixpOvhrFdT0PvIj0= go.etcd.io/etcd/server/v3 v3.5.5/go.mod h1:rZ95vDw/jrvsbj9XpTqPrTAB9/kzchVdhRirySPkUBc= -go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2 h1:IRJeR9r1pYWsHKTRe/IInb7lYvbBVIqOgsX/u0mbOWY= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= +google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg= google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY= -google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= +gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U= -gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A= -gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= gotest.tools v1.4.0/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= +k8s.io/component-base v0.26.7 h1:uqsOyZh0Zqoaup8tmHa491D/CvgFdGUs+X2H/inNUKM= k8s.io/component-base v0.26.7/go.mod h1:CZe1HTmX/DQdeBrb9XYOXzs96jXth8ZbFvhLMsoJLUg= +k8s.io/cri-api v0.27.1 h1:KWO+U8MfI9drXB/P4oU9VchaWYOlwDglJZVHWMpTT3Q= k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= -k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kms v0.26.7 h1:vruEJNh2IyFnPHbCH8CpUjekHy1HFJtHd/lE2K0lU78= k8s.io/kms v0.26.7/go.mod h1:AYuV9ZebRhr6cb1eT9L6kZVxvgIUxmE1Fe6kPhqYvuc= +kernel.org/pub/linux/libs/security/libcap/cap v1.2.67 h1:sPQ9qlSNR26fToTKbxe/HDWJlXvBLqGmt84LGCQkOy0= kernel.org/pub/linux/libs/security/libcap/cap v1.2.67/go.mod h1:GkntoBuwffz19qtdFVB+k2NtWNN+yCKnC/Ykv/hMiTU= +kernel.org/pub/linux/libs/security/libcap/psx v1.2.67 h1:NxbXJ7pDVq0FKBsqjieT92QDXI2XaqH2HAi4QcCOHt8= kernel.org/pub/linux/libs/security/libcap/psx v1.2.67/go.mod h1:+l6Ee2F59XiJ2I6WR5ObpC1utCQJZ/VLsEbQCD8RG24= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +nullprogram.com/x/optparse v1.0.0 h1:xGFgVi5ZaWOnYdac2foDT3vg0ZZC9ErXFV57mr4OHrI= +rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.37 h1:fAPTNEpzQMOLMGwOHNbUkR2xXTQwMJOZYNx+/mLlOh0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.37/go.mod h1:vfnxT4FXNT8eGvO+xi/DsyC/qHmdujqwrUa1WSspCsk= +tags.cncf.io/container-device-interface/specs-go v0.6.0 h1:V+tJJN6dqu8Vym6p+Ru+K5mJ49WL6Aoc5SJFSY0RLsQ= tags.cncf.io/container-device-interface/specs-go v0.6.0/go.mod h1:hMAwAbMZyBLdmYqWgYcKH0F/yctNpV3P35f+/088A80= diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 065e23d21d..ded147e00d 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -30,14 +30,18 @@ dependencies = [ [[package]] name = "aggregator" -version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=v0.10#cfa71a8abe45c21582a6b7ebb85b5b1c4cff01eb" +version = "0.11.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" dependencies = [ "ark-std 0.3.0", + "bitstream-io", "c-kzg", + "ctor 0.1.26", + "encoder", "env_logger 0.10.2", - "eth-types 0.1.0", + "eth-types 0.11.0", "ethers-core 2.0.7 (git+https://github.com/scroll-tech/ethers-rs.git?branch=v2.0.7)", + "gadgets 0.11.0", "halo2-base", "halo2-ecc", "halo2_proofs", @@ -47,19 +51,21 @@ dependencies = [ "num-bigint", "once_cell", "rand", - "revm-precompile 2.0.0", - "revm-primitives 3.1.1", + "revm-precompile", + "revm-primitives", "serde", "serde_json", "snark-verifier", "snark-verifier-sdk", - "zkevm-circuits 0.1.0", + "strum 0.25.0", + "strum_macros 0.25.3", + "zkevm-circuits 0.11.0", ] [[package]] name = "aggregator" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" dependencies = [ "ark-std 0.3.0", "bitstream-io", @@ -67,9 +73,9 @@ dependencies = [ "ctor 0.1.26", "encoder", "env_logger 0.10.2", - "eth-types 0.11.0", + "eth-types 0.12.0", "ethers-core 2.0.7 (git+https://github.com/scroll-tech/ethers-rs.git?branch=v2.0.7)", - "gadgets 0.11.0", + "gadgets 0.12.0", "halo2-base", "halo2-ecc", "halo2_proofs", @@ -79,15 +85,15 @@ dependencies = [ "num-bigint", "once_cell", "rand", - "revm-precompile 7.0.0", - "revm-primitives 4.0.0", + "revm-precompile", + "revm-primitives", "serde", "serde_json", "snark-verifier", "snark-verifier-sdk", "strum 0.25.0", "strum_macros 0.25.3", - "zkevm-circuits 0.11.0", + "zkevm-circuits 0.12.0", ] [[package]] @@ -128,9 +134,9 @@ dependencies = [ "cfg-if 1.0.0", "const-hex", "derive_more", - "hex-literal 0.4.1", + "hex-literal", "itoa", - "k256 0.13.3", + "k256", "keccak-asm", "proptest", "rand", @@ -450,12 +456,6 @@ dependencies = [ "rustc-demangle", ] -[[package]] -name = "base16ct" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" - [[package]] name = "base16ct" version = "0.2.0" @@ -596,8 +596,8 @@ name = "bls12_381" version = "0.8.0" source = "git+https://github.com/scroll-tech/bls12_381?branch=feat/impl_scalar_field#2c515f73a2462fef8681c8e884edf1710f52b22a" dependencies = [ - "ff 0.13.0", - "group 0.13.0", + "ff", + "group", "pairing", "pasta_curves", "rand_core", @@ -634,24 +634,25 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bus-mapping" -version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=v0.10#cfa71a8abe45c21582a6b7ebb85b5b1c4cff01eb" +version = "0.11.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" dependencies = [ - "eth-types 0.1.0", + "eth-types 0.11.0", "ethers-core 2.0.7 (git+https://github.com/scroll-tech/ethers-rs.git?branch=v2.0.7)", "ethers-providers 2.0.7 (registry+https://github.com/rust-lang/crates.io-index)", "ethers-signers", - "gadgets 0.1.0", + "external-tracer 0.11.0", + "gadgets 0.11.0", "halo2_proofs", "hex", "itertools 0.11.0", "log", - "mock 0.1.0", - "mpt-zktrie 0.1.0", + "mock 0.11.0", + "mpt-zktrie 0.11.0", "num", "poseidon-circuit", "rand", - "revm-precompile 2.0.0", + "revm-precompile", "serde", "serde_json", "strum 0.25.0", @@ -660,25 +661,23 @@ dependencies = [ [[package]] name = "bus-mapping" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" dependencies = [ - "eth-types 0.11.0", + "eth-types 0.12.0", "ethers-core 2.0.7 (git+https://github.com/scroll-tech/ethers-rs.git?branch=v2.0.7)", "ethers-providers 2.0.7 (registry+https://github.com/rust-lang/crates.io-index)", "ethers-signers", - "external-tracer 0.11.0", - "gadgets 0.11.0", + "gadgets 0.12.0", "halo2_proofs", "hex", "itertools 0.11.0", "log", - "mock 0.11.0", - "mpt-zktrie 0.11.0", + "mock 0.12.0", + "mpt-zktrie 0.12.0", "num", "poseidon-circuit", - "rand", - "revm-precompile 7.0.0", + "revm-precompile", "serde", "serde_json", "strum 0.25.0", @@ -817,7 +816,7 @@ dependencies = [ "coins-core", "digest 0.10.7", "hmac", - "k256 0.13.3", + "k256", "serde", "sha2", "thiserror", @@ -992,18 +991,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" -[[package]] -name = "crypto-bigint" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" -dependencies = [ - "generic-array", - "rand_core", - "subtle", - "zeroize", -] - [[package]] name = "crypto-bigint" version = "0.5.5" @@ -1096,15 +1083,6 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" -[[package]] -name = "der" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" -dependencies = [ - "const-oid", -] - [[package]] name = "der" version = "0.7.9" @@ -1172,29 +1150,17 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" -[[package]] -name = "ecdsa" -version = "0.14.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" -dependencies = [ - "der 0.6.1", - "elliptic-curve 0.12.3", - "rfc6979 0.3.1", - "signature 1.6.4", -] - [[package]] name = "ecdsa" version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der 0.7.9", + "der", "digest 0.10.7", - "elliptic-curve 0.13.8", - "rfc6979 0.4.0", - "signature 2.2.0", + "elliptic-curve", + "rfc6979", + "signature", "spki", ] @@ -1204,40 +1170,21 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" -[[package]] -name = "elliptic-curve" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" -dependencies = [ - "base16ct 0.1.1", - "crypto-bigint 0.4.9", - "der 0.6.1", - "digest 0.10.7", - "ff 0.12.1", - "generic-array", - "group 0.12.1", - "rand_core", - "sec1 0.3.0", - "subtle", - "zeroize", -] - [[package]] name = "elliptic-curve" version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "base16ct 0.2.0", - "crypto-bigint 0.5.5", + "base16ct", + "crypto-bigint", "digest 0.10.7", - "ff 0.13.0", + "ff", "generic-array", - "group 0.13.0", + "group", "pkcs8", "rand_core", - "sec1 0.7.3", + "sec1", "subtle", "zeroize", ] @@ -1268,7 +1215,7 @@ dependencies = [ "base64 0.13.1", "bytes", "hex", - "k256 0.13.3", + "k256", "log", "rand", "rlp", @@ -1364,22 +1311,25 @@ dependencies = [ [[package]] name = "eth-types" -version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=v0.10#cfa71a8abe45c21582a6b7ebb85b5b1c4cff01eb" +version = "0.11.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" dependencies = [ "base64 0.13.1", "ethers-core 2.0.7 (git+https://github.com/scroll-tech/ethers-rs.git?branch=v2.0.7)", "ethers-signers", - "halo2-base", - "halo2_proofs", + "halo2curves", "hex", "itertools 0.11.0", + "log", "num", "num-bigint", - "poseidon-circuit", + "poseidon-base", "regex", + "revm-precompile", + "revm-primitives", "serde", "serde_json", + "serde_stacker", "serde_with", "sha3 0.10.8", "strum 0.25.0", @@ -1390,8 +1340,8 @@ dependencies = [ [[package]] name = "eth-types" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" dependencies = [ "base64 0.13.1", "ethers-core 2.0.7 (git+https://github.com/scroll-tech/ethers-rs.git?branch=v2.0.7)", @@ -1404,11 +1354,10 @@ dependencies = [ "num-bigint", "poseidon-base", "regex", - "revm-precompile 7.0.0", - "revm-primitives 4.0.0", + "revm-precompile", + "revm-primitives", "serde", "serde_json", - "serde_stacker", "serde_with", "sha3 0.10.8", "strum 0.25.0", @@ -1474,11 +1423,11 @@ dependencies = [ "arrayvec", "bytes", "chrono", - "elliptic-curve 0.13.8", + "elliptic-curve", "ethabi", "generic-array", "hex", - "k256 0.13.3", + "k256", "num_enum 0.6.1", "open-fastrlp", "rand", @@ -1500,11 +1449,11 @@ dependencies = [ "arrayvec", "bytes", "chrono", - "elliptic-curve 0.13.8", + "elliptic-curve", "ethabi", "generic-array", "hex", - "k256 0.13.3", + "k256", "num_enum 0.6.1", "open-fastrlp", "rand", @@ -1599,7 +1548,7 @@ dependencies = [ "async-trait", "coins-bip32", "coins-bip39", - "elliptic-curve 0.13.8", + "elliptic-curve", "eth-keystore", "ethers-core 2.0.7 (git+https://github.com/scroll-tech/ethers-rs.git?branch=v2.0.7)", "hex", @@ -1611,11 +1560,11 @@ dependencies = [ [[package]] name = "external-tracer" -version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=v0.10#cfa71a8abe45c21582a6b7ebb85b5b1c4cff01eb" +version = "0.11.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" dependencies = [ - "eth-types 0.1.0", - "geth-utils 0.1.0", + "eth-types 0.11.0", + "geth-utils 0.11.0", "log", "serde", "serde_json", @@ -1624,11 +1573,11 @@ dependencies = [ [[package]] name = "external-tracer" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" dependencies = [ - "eth-types 0.11.0", - "geth-utils 0.11.0", + "eth-types 0.12.0", + "geth-utils 0.12.0", "log", "serde", "serde_json", @@ -1652,16 +1601,6 @@ dependencies = [ "bytes", ] -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "rand_core", - "subtle", -] - [[package]] name = "ff" version = "0.13.0" @@ -1851,21 +1790,22 @@ dependencies = [ [[package]] name = "gadgets" -version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=v0.10#cfa71a8abe45c21582a6b7ebb85b5b1c4cff01eb" +version = "0.11.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" dependencies = [ - "eth-types 0.1.0", + "eth-types 0.11.0", "halo2_proofs", + "poseidon-base", "sha3 0.10.8", "strum 0.25.0", ] [[package]] name = "gadgets" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" dependencies = [ - "eth-types 0.11.0", + "eth-types 0.12.0", "halo2_proofs", "poseidon-base", "sha3 0.10.8", @@ -1885,8 +1825,8 @@ dependencies = [ [[package]] name = "geth-utils" -version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=v0.10#cfa71a8abe45c21582a6b7ebb85b5b1c4cff01eb" +version = "0.11.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" dependencies = [ "env_logger 0.10.2", "gobuild", @@ -1895,8 +1835,8 @@ dependencies = [ [[package]] name = "geth-utils" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" dependencies = [ "env_logger 0.10.2", "gobuild", @@ -1968,24 +1908,13 @@ dependencies = [ "cc", ] -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "rand_core", - "subtle", -] - [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.0", + "ff", "rand_core", "subtle", ] @@ -2033,7 +1962,7 @@ name = "halo2-base" version = "0.2.2" source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#817cace374a9f4b2eca682b1cc36f143255ea25f" dependencies = [ - "ff 0.13.0", + "ff", "halo2_proofs", "itertools 0.10.5", "num-bigint", @@ -2048,8 +1977,8 @@ name = "halo2-ecc" version = "0.2.2" source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#817cace374a9f4b2eca682b1cc36f143255ea25f" dependencies = [ - "ff 0.13.0", - "group 0.13.0", + "ff", + "group", "halo2-base", "itertools 0.10.5", "num-bigint", @@ -2108,8 +2037,8 @@ source = "git+https://github.com/scroll-tech/halo2.git?branch=v1.1#e5ddf67e5ae16 dependencies = [ "arrayvec", "bitvec", - "ff 0.13.0", - "group 0.13.0", + "ff", + "group", "halo2_proofs", "halo2curves", "lazy_static", @@ -2127,8 +2056,8 @@ dependencies = [ "blake2b_simd", "cfg-if 0.1.10", "crossbeam", - "ff 0.13.0", - "group 0.13.0", + "ff", + "group", "halo2curves", "log", "maybe-rayon", @@ -2150,8 +2079,8 @@ source = "git+https://github.com/scroll-tech/halo2curves?branch=v0.1.0#112f5b9bf dependencies = [ "blake2b_simd", "bls12_381", - "ff 0.13.0", - "group 0.13.0", + "ff", + "group", "lazy_static", "maybe-rayon", "num-bigint", @@ -2218,12 +2147,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -[[package]] -name = "hex-literal" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" - [[package]] name = "hex-literal" version = "0.4.1" @@ -2589,19 +2512,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "k256" -version = "0.11.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" -dependencies = [ - "cfg-if 1.0.0", - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2", - "sha3 0.10.8", -] - [[package]] name = "k256" version = "0.13.3" @@ -2609,11 +2519,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ "cfg-if 1.0.0", - "ecdsa 0.16.9", - "elliptic-curve 0.13.8", + "ecdsa", + "elliptic-curve", "once_cell", "sha2", - "signature 2.2.0", + "signature", ] [[package]] @@ -2635,20 +2545,6 @@ dependencies = [ "sha3-asm", ] -[[package]] -name = "keccak256" -version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=v0.10#cfa71a8abe45c21582a6b7ebb85b5b1c4cff01eb" -dependencies = [ - "env_logger 0.10.2", - "eth-types 0.1.0", - "halo2_proofs", - "itertools 0.11.0", - "log", - "num-bigint", - "num-traits", -] - [[package]] name = "lazy_static" version = "1.4.0" @@ -2779,13 +2675,13 @@ dependencies = [ [[package]] name = "mock" -version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=v0.10#cfa71a8abe45c21582a6b7ebb85b5b1c4cff01eb" +version = "0.11.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" dependencies = [ - "eth-types 0.1.0", + "eth-types 0.11.0", "ethers-core 2.0.7 (git+https://github.com/scroll-tech/ethers-rs.git?branch=v2.0.7)", "ethers-signers", - "external-tracer 0.1.0", + "external-tracer 0.11.0", "itertools 0.11.0", "log", "rand", @@ -2794,13 +2690,13 @@ dependencies = [ [[package]] name = "mock" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" dependencies = [ - "eth-types 0.11.0", + "eth-types 0.12.0", "ethers-core 2.0.7 (git+https://github.com/scroll-tech/ethers-rs.git?branch=v2.0.7)", "ethers-signers", - "external-tracer 0.11.0", + "external-tracer 0.12.0", "itertools 0.11.0", "log", "rand", @@ -2809,31 +2705,30 @@ dependencies = [ [[package]] name = "mpt-zktrie" -version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=v0.10#cfa71a8abe45c21582a6b7ebb85b5b1c4cff01eb" +version = "0.11.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" dependencies = [ - "eth-types 0.1.0", - "halo2-mpt-circuits", - "halo2_proofs", + "eth-types 0.11.0", + "halo2curves", "hex", "log", "num-bigint", - "poseidon-circuit", - "zktrie 0.2.0", + "poseidon-base", + "zktrie", ] [[package]] name = "mpt-zktrie" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" dependencies = [ - "eth-types 0.11.0", + "eth-types 0.12.0", "halo2curves", "hex", "log", "num-bigint", "poseidon-base", - "zktrie 0.3.0", + "zktrie", ] [[package]] @@ -3088,7 +2983,7 @@ version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" dependencies = [ - "group 0.13.0", + "group", ] [[package]] @@ -3172,8 +3067,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" dependencies = [ "blake2b_simd", - "ff 0.13.0", - "group 0.13.0", + "ff", + "group", "lazy_static", "rand", "static_assertions", @@ -3270,7 +3165,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.9", + "der", "spki", ] @@ -3304,7 +3199,7 @@ name = "poseidon-circuit" version = "0.1.0" source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main#7b96835c6201afdbfaf3d13d641efbaaf5db2d20" dependencies = [ - "ff 0.13.0", + "ff", "halo2_proofs", "log", "poseidon-base", @@ -3399,8 +3294,8 @@ dependencies = [ "http 1.1.0", "log", "once_cell", - "prover 0.1.0 (git+https://github.com/scroll-tech/zkevm-circuits.git?branch=v0.10)", "prover 0.11.0", + "prover 0.12.0", "rand", "reqwest 0.12.4", "reqwest-middleware", @@ -3416,17 +3311,17 @@ dependencies = [ [[package]] name = "prover" -version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=v0.10#cfa71a8abe45c21582a6b7ebb85b5b1c4cff01eb" +version = "0.11.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" dependencies = [ - "aggregator 0.1.0", + "aggregator 0.11.0", "anyhow", "base64 0.13.1", "blake2", - "bus-mapping 0.1.0", + "bus-mapping 0.11.0", "chrono", "dotenvy", - "eth-types 0.1.0", + "eth-types 0.11.0", "ethers-core 2.0.7 (git+https://github.com/scroll-tech/ethers-rs.git?branch=v2.0.7)", "git-version", "halo2_proofs", @@ -3434,7 +3329,7 @@ dependencies = [ "itertools 0.11.0", "log", "log4rs", - "mpt-zktrie 0.1.0", + "mpt-zktrie 0.11.0", "num-bigint", "rand", "rand_xorshift", @@ -3445,22 +3340,22 @@ dependencies = [ "sha2", "snark-verifier", "snark-verifier-sdk", - "zkevm-circuits 0.1.0", + "zkevm-circuits 0.11.0", ] [[package]] name = "prover" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" dependencies = [ - "aggregator 0.11.0", + "aggregator 0.12.0", "anyhow", "base64 0.13.1", "blake2", - "bus-mapping 0.11.0", + "bus-mapping 0.12.0", "chrono", "dotenvy", - "eth-types 0.11.0", + "eth-types 0.12.0", "ethers-core 2.0.7 (git+https://github.com/scroll-tech/ethers-rs.git?branch=v2.0.7)", "git-version", "halo2_proofs", @@ -3468,7 +3363,7 @@ dependencies = [ "itertools 0.11.0", "log", "log4rs", - "mpt-zktrie 0.11.0", + "mpt-zktrie 0.12.0", "num-bigint", "rand", "rand_xorshift", @@ -3479,7 +3374,7 @@ dependencies = [ "sha2", "snark-verifier", "snark-verifier-sdk", - "zkevm-circuits 0.11.0", + "zkevm-circuits 0.12.0", ] [[package]] @@ -3768,81 +3663,26 @@ dependencies = [ "sha3 0.10.8", ] -[[package]] -name = "revm-precompile" -version = "2.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=scroll-fix#aebf2e591e622e6bcce2c5d4bf3336935a68cf11" -dependencies = [ - "k256 0.11.6", - "num", - "once_cell", - "revm-primitives 1.0.0", - "ripemd", - "secp256k1 0.26.0", - "sha2", - "sha3 0.10.8", - "substrate-bn", -] - [[package]] name = "revm-precompile" version = "7.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#8543dd627348907773d8057807b6a310b276bb30" +source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#36c304d9e9ba4e4b2d5468d91a6bd27210133b6a" dependencies = [ "aurora-engine-modexp", "c-kzg", - "k256 0.13.3", + "k256", "once_cell", - "revm-primitives 4.0.0", + "revm-primitives", "ripemd", "secp256k1 0.29.0", "sha2", "substrate-bn", ] -[[package]] -name = "revm-primitives" -version = "1.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=scroll-fix#aebf2e591e622e6bcce2c5d4bf3336935a68cf11" -dependencies = [ - "auto_impl", - "bytes", - "derive_more", - "enumn", - "fixed-hash", - "hashbrown 0.13.2", - "hex", - "hex-literal 0.3.4", - "rlp", - "ruint", - "sha3 0.10.8", -] - -[[package]] -name = "revm-primitives" -version = "3.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbbc9640790cebcb731289afb7a7d96d16ad94afeb64b5d0b66443bd151e79d6" -dependencies = [ - "alloy-primitives", - "auto_impl", - "bitflags 2.5.0", - "bitvec", - "c-kzg", - "cfg-if 1.0.0", - "derive_more", - "dyn-clone", - "enumn", - "hashbrown 0.14.5", - "hex", - "once_cell", - "serde", -] - [[package]] name = "revm-primitives" version = "4.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#8543dd627348907773d8057807b6a310b276bb30" +source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#36c304d9e9ba4e4b2d5468d91a6bd27210133b6a" dependencies = [ "alloy-primitives", "auto_impl", @@ -3879,17 +3719,6 @@ dependencies = [ "substrate-bn", ] -[[package]] -name = "rfc6979" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" -dependencies = [ - "crypto-bigint 0.4.9", - "hmac", - "zeroize", -] - [[package]] name = "rfc6979" version = "0.4.0" @@ -4191,27 +4020,14 @@ dependencies = [ "untrusted 0.9.0", ] -[[package]] -name = "sec1" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" -dependencies = [ - "base16ct 0.1.1", - "der 0.6.1", - "generic-array", - "subtle", - "zeroize", -] - [[package]] name = "sec1" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "base16ct 0.2.0", - "der 0.7.9", + "base16ct", + "der", "generic-array", "pkcs8", "subtle", @@ -4227,15 +4043,6 @@ dependencies = [ "secp256k1-sys 0.6.1", ] -[[package]] -name = "secp256k1" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4124a35fe33ae14259c490fd70fa199a32b9ce9502f2ee6bc4f81ec06fa65894" -dependencies = [ - "secp256k1-sys 0.8.1", -] - [[package]] name = "secp256k1" version = "0.29.0" @@ -4255,15 +4062,6 @@ dependencies = [ "cc", ] -[[package]] -name = "secp256k1-sys" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a129b9e9efbfb223753b9163c4ab3b13cff7fd9c7f010fbac25ab4099fa07e" -dependencies = [ - "cc", -] - [[package]] name = "secp256k1-sys" version = "0.10.0" @@ -4470,16 +4268,6 @@ dependencies = [ "cfg-if 1.0.0", ] -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" -dependencies = [ - "digest 0.10.7", - "rand_core", -] - [[package]] name = "signature" version = "2.2.0" @@ -4551,7 +4339,7 @@ source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#58c46 dependencies = [ "bincode", "ethereum-types", - "ff 0.13.0", + "ff", "halo2-base", "hex", "itertools 0.12.1", @@ -4595,7 +4383,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der 0.7.9", + "der", ] [[package]] @@ -5552,29 +5340,29 @@ dependencies = [ [[package]] name = "zkevm-circuits" -version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=v0.10#cfa71a8abe45c21582a6b7ebb85b5b1c4cff01eb" +version = "0.11.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" dependencies = [ "array-init", - "bus-mapping 0.1.0", + "bus-mapping 0.11.0", "either", "env_logger 0.10.2", - "eth-types 0.1.0", + "eth-types 0.11.0", "ethers-core 2.0.7 (git+https://github.com/scroll-tech/ethers-rs.git?branch=v2.0.7)", "ethers-signers", - "ff 0.13.0", - "gadgets 0.1.0", + "ff", + "gadgets 0.11.0", "halo2-base", "halo2-ecc", + "halo2-mpt-circuits", "halo2_gadgets", "halo2_proofs", "hex", "itertools 0.11.0", - "keccak256", "log", "misc-precompiled-circuit", - "mock 0.1.0", - "mpt-zktrie 0.1.0", + "mock 0.11.0", + "mpt-zktrie 0.11.0", "num", "num-bigint", "poseidon-circuit", @@ -5594,18 +5382,18 @@ dependencies = [ [[package]] name = "zkevm-circuits" -version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.5#6ea8fb3fad4d8a8bfe873e18e2f881ad1c807ded" +version = "0.12.0" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0#6a1f65a1f99429f3725ef4d6788f5643bb61aa6f" dependencies = [ "array-init", - "bus-mapping 0.11.0", + "bus-mapping 0.12.0", "either", "env_logger 0.10.2", - "eth-types 0.11.0", + "eth-types 0.12.0", "ethers-core 2.0.7 (git+https://github.com/scroll-tech/ethers-rs.git?branch=v2.0.7)", "ethers-signers", - "ff 0.13.0", - "gadgets 0.11.0", + "ff", + "gadgets 0.12.0", "halo2-base", "halo2-ecc", "halo2-mpt-circuits", @@ -5615,8 +5403,8 @@ dependencies = [ "itertools 0.11.0", "log", "misc-precompiled-circuit", - "mock 0.11.0", - "mpt-zktrie 0.11.0", + "mock 0.12.0", + "mpt-zktrie 0.12.0", "num", "num-bigint", "poseidon-circuit", @@ -5634,14 +5422,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "zktrie" -version = "0.2.0" -source = "git+https://github.com/scroll-tech/zktrie.git?tag=v0.7.1#a12f2f262ad3e82301e39ecdf9bfe235befc7074" -dependencies = [ - "gobuild", -] - [[package]] name = "zktrie" version = "0.3.0" diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 192f617340..19faa80f84 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -29,8 +29,8 @@ ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = " ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" } halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" } snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] } -prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", branch = "v0.10", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] } -prover_next = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.5", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] } +prover_curie = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.5", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] } +prover_darwin = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.12.0", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] } base64 = "0.13.1" reqwest = { version = "0.12.4", features = ["gzip"] } reqwest-middleware = "0.3" diff --git a/prover/Makefile b/prover/Makefile index d0b06e7589..6bcd3faa7f 100644 --- a/prover/Makefile +++ b/prover/Makefile @@ -37,14 +37,10 @@ endif prover: GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release - rm -rf ./lib && mkdir ./lib - find target/ -name "libzktrie.so" | xargs -I{} cp {} ./lib tests_binary: cargo clean && cargo test --release --no-run ls target/release/deps/prover* | grep -v "\.d" | xargs -I{} ln -sf {} ./prover.test - rm -rf ./lib && mkdir ./lib - find target/ -name "libzktrie.so" | xargs -I{} cp {} ./lib lint: cargo check --all-features diff --git a/prover/src/config.rs b/prover/src/config.rs index 5f301337f9..4e3c1f2ccc 100644 --- a/prover/src/config.rs +++ b/prover/src/config.rs @@ -2,7 +2,7 @@ use anyhow::{bail, Result}; use serde::{Deserialize, Serialize}; use std::fs::File; -use crate::types::ProofType; +use crate::types::ProverType; #[derive(Debug, Serialize, Deserialize)] pub struct CircuitConfig { @@ -24,14 +24,13 @@ pub struct L2GethConfig { pub endpoint: String, } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Deserialize)] pub struct Config { pub prover_name: String, pub keystore_path: String, pub keystore_password: String, pub db_path: String, - #[serde(default)] - pub proof_type: ProofType, + pub prover_type: ProverType, pub low_version_circuit: CircuitConfig, pub high_version_circuit: CircuitConfig, pub coordinator: CoordinatorConfig, diff --git a/prover/src/coordinator_client.rs b/prover/src/coordinator_client.rs index 52344756ce..56fd58954e 100644 --- a/prover/src/coordinator_client.rs +++ b/prover/src/coordinator_client.rs @@ -21,6 +21,7 @@ pub struct CoordinatorClient<'a> { key_signer: Rc, rt: Runtime, listener: Box, + vks: Vec, } impl<'a> CoordinatorClient<'a> { @@ -28,6 +29,7 @@ impl<'a> CoordinatorClient<'a> { config: &'a Config, key_signer: Rc, listener: Box, + vks: Vec, ) -> Result { let rt = tokio::runtime::Builder::new_current_thread() .enable_all() @@ -46,6 +48,7 @@ impl<'a> CoordinatorClient<'a> { key_signer, rt, listener, + vks, }; client.login()?; Ok(client) @@ -68,12 +71,15 @@ impl<'a> CoordinatorClient<'a> { challenge: token.clone(), prover_name: self.config.prover_name.clone(), prover_version: crate::version::get_version(), + prover_types: vec![self.config.prover_type], + vks: self.vks.clone(), }; - let buffer = login_message.rlp(); + let buffer = rlp::encode(&login_message); let signature = self.key_signer.sign_buffer(&buffer)?; let login_request = LoginRequest { message: login_message, + public_key: self.key_signer.get_public_key(), signature, }; let login_response = self.rt.block_on(api.login(&login_request, &token))?; diff --git a/prover/src/coordinator_client/types.rs b/prover/src/coordinator_client/types.rs index 2f641e2b73..c646a9afd4 100644 --- a/prover/src/coordinator_client/types.rs +++ b/prover/src/coordinator_client/types.rs @@ -1,6 +1,6 @@ use super::errors::ErrorCode; -use crate::types::{ProofFailureType, ProofStatus}; -use rlp::RlpStream; +use crate::types::{ProofFailureType, ProofStatus, ProverType, TaskType}; +use rlp::{Encodable, RlpStream}; use serde::{Deserialize, Serialize}; #[derive(Deserialize)] @@ -15,23 +15,36 @@ pub struct LoginMessage { pub challenge: String, pub prover_name: String, pub prover_version: String, + pub prover_types: Vec, + pub vks: Vec, } -impl LoginMessage { - pub fn rlp(&self) -> Vec { - let mut rlp = RlpStream::new(); - let num_fields = 3; - rlp.begin_list(num_fields); - rlp.append(&self.prover_name); - rlp.append(&self.prover_version); - rlp.append(&self.challenge); - rlp.out().freeze().into() +impl Encodable for LoginMessage { + fn rlp_append(&self, s: &mut RlpStream) { + let num_fields = 5; + s.begin_list(num_fields); + s.append(&self.challenge); + s.append(&self.prover_version); + s.append(&self.prover_name); + // The ProverType in go side is an type alias of uint8 + // A uint8 slice is treated as a string when doing the rlp encoding + let prover_types = self + .prover_types + .iter() + .map(|prover_type: &ProverType| prover_type.to_u8()) + .collect::>(); + s.append(&prover_types); + s.begin_list(self.vks.len()); + for vk in &self.vks { + s.append(vk); + } } } #[derive(Serialize, Deserialize)] pub struct LoginRequest { pub message: LoginMessage, + pub public_key: String, pub signature: String, } @@ -45,16 +58,15 @@ pub type ChallengeResponseData = LoginResponseData; #[derive(Default, Serialize, Deserialize)] pub struct GetTaskRequest { - pub task_type: crate::types::ProofType, + pub task_types: Vec, pub prover_height: Option, - pub vks: Vec, } #[derive(Serialize, Deserialize)] pub struct GetTaskResponseData { pub uuid: String, pub task_id: String, - pub task_type: crate::types::ProofType, + pub task_type: TaskType, pub task_data: String, pub hard_fork_name: String, } @@ -63,12 +75,11 @@ pub struct GetTaskResponseData { pub struct SubmitProofRequest { pub uuid: String, pub task_id: String, - pub task_type: crate::types::ProofType, + pub task_type: TaskType, pub status: ProofStatus, pub proof: String, pub failure_type: Option, pub failure_msg: Option, - pub hard_fork_name: String, } #[derive(Serialize, Deserialize)] diff --git a/prover/src/main.rs b/prover/src/main.rs index d7d2611154..22605a7a5c 100644 --- a/prover/src/main.rs +++ b/prover/src/main.rs @@ -65,7 +65,7 @@ fn main() -> Result<(), Box> { log::info!( "prover start successfully. name: {}, type: {:?}, publickey: {}, version: {}", config.prover_name, - config.proof_type, + config.prover_type, prover.get_public_key(), version::get_version(), ); diff --git a/prover/src/prover.rs b/prover/src/prover.rs index 538e1d2a21..7de83906e0 100644 --- a/prover/src/prover.rs +++ b/prover/src/prover.rs @@ -8,7 +8,8 @@ use crate::{ coordinator_client::{listener::Listener, types::*, CoordinatorClient}, geth_client::GethClient, key_signer::KeySigner, - types::{ProofFailureType, ProofStatus, ProofType}, + types::{ProofFailureType, ProofStatus, ProverType}, + utils::get_task_types, zk_circuits_handler::{CircuitsHandler, CircuitsHandlerProvider}, }; @@ -24,16 +25,11 @@ pub struct Prover<'a> { impl<'a> Prover<'a> { pub fn new(config: &'a Config, coordinator_listener: Box) -> Result { - let proof_type = config.proof_type; + let prover_type = config.prover_type; let keystore_path = &config.keystore_path; let keystore_password = &config.keystore_password; - let key_signer = Rc::new(KeySigner::new(keystore_path, keystore_password)?); - let coordinator_client = - CoordinatorClient::new(config, Rc::clone(&key_signer), coordinator_listener) - .context("failed to create coordinator_client")?; - - let geth_client = if config.proof_type == ProofType::Chunk { + let geth_client = if config.prover_type == ProverType::Chunk { Some(Rc::new(RefCell::new( GethClient::new( &config.prover_name, @@ -45,9 +41,16 @@ impl<'a> Prover<'a> { None }; - let provider = CircuitsHandlerProvider::new(proof_type, config, geth_client.clone()) + let provider = CircuitsHandlerProvider::new(prover_type, config, geth_client.clone()) .context("failed to create circuits handler provider")?; + let vks = provider.init_vks(prover_type, config, geth_client.clone()); + + let key_signer = Rc::new(KeySigner::new(keystore_path, keystore_password)?); + let coordinator_client = + CoordinatorClient::new(config, Rc::clone(&key_signer), coordinator_listener, vks) + .context("failed to create coordinator_client")?; + let prover = Prover { config, key_signer: Rc::clone(&key_signer), @@ -59,10 +62,6 @@ impl<'a> Prover<'a> { Ok(prover) } - pub fn get_proof_type(&self) -> ProofType { - self.config.proof_type - } - pub fn get_public_key(&self) -> String { self.key_signer.get_public_key() } @@ -70,12 +69,11 @@ impl<'a> Prover<'a> { pub fn fetch_task(&self) -> Result { log::info!("[prover] start to fetch_task"); let mut req = GetTaskRequest { - task_type: self.get_proof_type(), + task_types: get_task_types(self.config.prover_type), prover_height: None, - vks: self.circuits_handler_provider.borrow().get_vks(), }; - if self.get_proof_type() == ProofType::Chunk { + if self.config.prover_type == ProverType::Chunk { let latest_block_number = self.get_latest_block_number_value()?; if let Some(v) = latest_block_number { if v.as_u64() == 0 { @@ -130,7 +128,6 @@ impl<'a> Prover<'a> { task_type: proof_detail.proof_type, status: ProofStatus::Ok, proof: proof_detail.proof_data, - hard_fork_name: task.hard_fork_name.clone(), ..Default::default() }; @@ -150,8 +147,7 @@ impl<'a> Prover<'a> { task_type: task.task_type, status: ProofStatus::Error, failure_type: Some(failure_type), - failure_msg: Some(error.to_string()), - hard_fork_name: task.hard_fork_name.clone(), + failure_msg: Some(format!("{:#}", error)), ..Default::default() }; self.do_submit(&request) diff --git a/prover/src/types.rs b/prover/src/types.rs index 4c835c92a0..47f2724fc7 100644 --- a/prover/src/types.rs +++ b/prover/src/types.rs @@ -6,57 +6,107 @@ use crate::coordinator_client::types::GetTaskResponseData; pub type CommonHash = H256; #[derive(Debug, Clone, Copy, PartialEq)] -pub enum ProofType { +pub enum TaskType { Undefined, Chunk, Batch, + Bundle, } -impl ProofType { +impl TaskType { fn from_u8(v: u8) -> Self { match v { - 1 => ProofType::Chunk, - 2 => ProofType::Batch, - _ => ProofType::Undefined, + 1 => TaskType::Chunk, + 2 => TaskType::Batch, + 3 => TaskType::Bundle, + _ => TaskType::Undefined, } } } -impl Serialize for ProofType { +impl Serialize for TaskType { fn serialize(&self, serializer: S) -> Result where S: Serializer, { match *self { - ProofType::Undefined => serializer.serialize_i8(0), - ProofType::Chunk => serializer.serialize_i8(1), - ProofType::Batch => serializer.serialize_i8(2), + TaskType::Undefined => serializer.serialize_u8(0), + TaskType::Chunk => serializer.serialize_u8(1), + TaskType::Batch => serializer.serialize_u8(2), + TaskType::Bundle => serializer.serialize_u8(3), } } } -impl<'de> Deserialize<'de> for ProofType { +impl<'de> Deserialize<'de> for TaskType { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { let v: u8 = u8::deserialize(deserializer)?; - Ok(ProofType::from_u8(v)) + Ok(TaskType::from_u8(v)) } } -impl Default for ProofType { +impl Default for TaskType { fn default() -> Self { Self::Undefined } } +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum ProverType { + Chunk, + Batch, +} + +impl ProverType { + fn from_u8(v: u8) -> Self { + match v { + 1 => ProverType::Chunk, + 2 => ProverType::Batch, + _ => { + panic!("invalid prover_type") + } + } + } + + pub fn to_u8(self) -> u8 { + match self { + ProverType::Chunk => 1, + ProverType::Batch => 2, + } + } +} + +impl Serialize for ProverType { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + ProverType::Chunk => serializer.serialize_u8(1), + ProverType::Batch => serializer.serialize_u8(2), + } + } +} + +impl<'de> Deserialize<'de> for ProverType { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let v: u8 = u8::deserialize(deserializer)?; + Ok(ProverType::from_u8(v)) + } +} + #[derive(Serialize, Deserialize, Default)] pub struct Task { pub uuid: String, pub id: String, #[serde(rename = "type", default)] - pub task_type: ProofType, + pub task_type: TaskType, pub task_data: String, #[serde(default)] pub hard_fork_name: String, @@ -100,7 +150,7 @@ impl From for TaskWrapper { pub struct ProofDetail { pub id: String, #[serde(rename = "type", default)] - pub proof_type: ProofType, + pub proof_type: TaskType, pub proof_data: String, pub error: String, } diff --git a/prover/src/utils.rs b/prover/src/utils.rs index 87b7137e33..18be4ac7a1 100644 --- a/prover/src/utils.rs +++ b/prover/src/utils.rs @@ -1,6 +1,8 @@ use env_logger::Env; use std::{fs::OpenOptions, sync::Once}; +use crate::types::{ProverType, TaskType}; + static LOG_INIT: Once = Once::new(); /// Initialize log @@ -21,3 +23,10 @@ pub fn log_init(log_file: Option) { builder.init(); }); } + +pub fn get_task_types(prover_type: ProverType) -> Vec { + match prover_type { + ProverType::Chunk => vec![TaskType::Chunk], + ProverType::Batch => vec![TaskType::Batch, TaskType::Bundle], + } +} diff --git a/prover/src/zk_circuits_handler.rs b/prover/src/zk_circuits_handler.rs index 9d88d58290..db254e2abb 100644 --- a/prover/src/zk_circuits_handler.rs +++ b/prover/src/zk_circuits_handler.rs @@ -1,14 +1,15 @@ -mod bernoulli; mod curie; +mod darwin; use super::geth_client::GethClient; use crate::{ config::{AssetsDirEnvConfig, Config}, - types::{ProofType, Task}, + types::{ProverType, Task, TaskType}, + utils::get_task_types, }; use anyhow::{bail, Result}; -use bernoulli::BaseCircuitsHandler; -use curie::NextCircuitsHandler; +use curie::CurieHandler; +use darwin::DarwinHandler; use std::{cell::RefCell, collections::HashMap, rc::Rc}; type HardForkName = String; @@ -20,38 +21,37 @@ pub mod utils { } pub trait CircuitsHandler { - fn get_vk(&self, task_type: ProofType) -> Option>; + fn get_vk(&self, task_type: TaskType) -> Option>; - fn get_proof_data(&self, task_type: ProofType, task: &Task) -> Result; + fn get_proof_data(&self, task_type: TaskType, task: &Task) -> Result; } type CircuitsHandlerBuilder = fn( - proof_type: ProofType, + prover_type: ProverType, config: &Config, geth_client: Option>>, ) -> Result>; pub struct CircuitsHandlerProvider<'a> { - proof_type: ProofType, + prover_type: ProverType, config: &'a Config, geth_client: Option>>, circuits_handler_builder_map: HashMap, current_hard_fork_name: Option, current_circuit: Option>>, - vks: Vec, } impl<'a> CircuitsHandlerProvider<'a> { pub fn new( - proof_type: ProofType, + prover_type: ProverType, config: &'a Config, geth_client: Option>>, ) -> Result { let mut m: HashMap = HashMap::new(); fn handler_builder( - proof_type: ProofType, + prover_type: ProverType, config: &Config, geth_client: Option>>, ) -> Result> { @@ -60,8 +60,8 @@ impl<'a> CircuitsHandlerProvider<'a> { &config.low_version_circuit.hard_fork_name ); AssetsDirEnvConfig::enable_first(); - BaseCircuitsHandler::new( - proof_type, + CurieHandler::new( + prover_type, &config.low_version_circuit.params_path, &config.low_version_circuit.assets_path, geth_client, @@ -74,7 +74,7 @@ impl<'a> CircuitsHandlerProvider<'a> { ); fn next_handler_builder( - proof_type: ProofType, + prover_type: ProverType, config: &Config, geth_client: Option>>, ) -> Result> { @@ -83,8 +83,8 @@ impl<'a> CircuitsHandlerProvider<'a> { &config.high_version_circuit.hard_fork_name ); AssetsDirEnvConfig::enable_second(); - NextCircuitsHandler::new( - proof_type, + DarwinHandler::new( + prover_type, &config.high_version_circuit.params_path, &config.high_version_circuit.assets_path, geth_client, @@ -97,16 +97,13 @@ impl<'a> CircuitsHandlerProvider<'a> { next_handler_builder, ); - let vks = CircuitsHandlerProvider::init_vks(proof_type, config, &m, geth_client.clone()); - let provider = CircuitsHandlerProvider { - proof_type, + prover_type, config, geth_client, circuits_handler_builder_map: m, current_hard_fork_name: None, current_circuit: None, - vks, }; Ok(provider) @@ -132,7 +129,7 @@ impl<'a> CircuitsHandlerProvider<'a> { ); if let Some(builder) = self.circuits_handler_builder_map.get(hard_fork_name) { log::info!("building circuits handler for {hard_fork_name}"); - let handler = builder(self.proof_type, self.config, self.geth_client.clone()) + let handler = builder(self.prover_type, self.config, self.geth_client.clone()) .expect("failed to build circuits handler"); self.current_hard_fork_name = Some(hard_fork_name.clone()); let rc_handler = Rc::new(handler); @@ -146,27 +143,33 @@ impl<'a> CircuitsHandlerProvider<'a> { } } - fn init_vks( - proof_type: ProofType, + pub fn init_vks( + &self, + prover_type: ProverType, config: &'a Config, - circuits_handler_builder_map: &HashMap, geth_client: Option>>, ) -> Vec { - circuits_handler_builder_map + self.circuits_handler_builder_map .iter() - .map(|(hard_fork_name, build)| { - let handler = build(proof_type, config, geth_client.clone()) + .flat_map(|(hard_fork_name, build)| { + let handler = build(prover_type, config, geth_client.clone()) .expect("failed to build circuits handler"); - let vk = handler - .get_vk(proof_type) - .map_or("".to_string(), utils::encode_vk); - log::info!("vk for {hard_fork_name} is {vk}"); - vk + + get_task_types(prover_type) + .into_iter() + .map(|task_type| { + let vk = handler + .get_vk(task_type) + .map_or("".to_string(), utils::encode_vk); + log::info!( + "vk for {hard_fork_name}, is {vk}, task_type: {:?}", + task_type + ); + vk + }) + .filter(|vk| !vk.is_empty()) + .collect::>() }) .collect::>() } - - pub fn get_vks(&self) -> Vec { - self.vks.clone() - } } diff --git a/prover/src/zk_circuits_handler/curie.rs b/prover/src/zk_circuits_handler/curie.rs index 032ea7ca73..c3b3a1966f 100644 --- a/prover/src/zk_circuits_handler/curie.rs +++ b/prover/src/zk_circuits_handler/curie.rs @@ -1,17 +1,22 @@ use super::CircuitsHandler; -use crate::{geth_client::GethClient, types::ProofType}; +use crate::{ + geth_client::GethClient, + types::{ProverType, TaskType}, +}; use anyhow::{bail, Context, Ok, Result}; +use once_cell::sync::Lazy; use serde::Deserialize; use crate::types::{CommonHash, Task}; -use std::{cell::RefCell, cmp::Ordering, rc::Rc}; +use std::{cell::RefCell, cmp::Ordering, env, rc::Rc}; -use prover_next::{ +use prover_curie::{ aggregator::Prover as BatchProver, check_chunk_hashes, zkevm::Prover as ChunkProver, BatchProof, BatchProvingTask, BlockTrace, ChunkInfo, ChunkProof, ChunkProvingTask, }; -use super::bernoulli::OUTPUT_DIR; +// Only used for debugging. +static OUTPUT_DIR: Lazy> = Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok()); #[derive(Deserialize)] pub struct BatchTaskDetail { @@ -29,33 +34,32 @@ fn get_block_number(block_trace: &BlockTrace) -> Option { } #[derive(Default)] -pub struct NextCircuitsHandler { +pub struct CurieHandler { chunk_prover: Option>, batch_prover: Option>, geth_client: Option>>, } -impl NextCircuitsHandler { +impl CurieHandler { pub fn new( - proof_type: ProofType, + prover_type: ProverType, params_dir: &str, assets_dir: &str, geth_client: Option>>, ) -> Result { - match proof_type { - ProofType::Chunk => Ok(Self { + match prover_type { + ProverType::Chunk => Ok(Self { chunk_prover: Some(RefCell::new(ChunkProver::from_dirs(params_dir, assets_dir))), batch_prover: None, geth_client, }), - ProofType::Batch => Ok(Self { + ProverType::Batch => Ok(Self { batch_prover: Some(RefCell::new(BatchProver::from_dirs(params_dir, assets_dir))), chunk_prover: None, geth_client, }), - _ => bail!("proof type invalid"), } } @@ -186,25 +190,26 @@ impl NextCircuitsHandler { } } -impl CircuitsHandler for NextCircuitsHandler { - fn get_vk(&self, task_type: ProofType) -> Option> { +impl CircuitsHandler for CurieHandler { + fn get_vk(&self, task_type: TaskType) -> Option> { match task_type { - ProofType::Chunk => self + TaskType::Chunk => self .chunk_prover .as_ref() .and_then(|prover| prover.borrow().get_vk()), - ProofType::Batch => self + TaskType::Batch => self .batch_prover .as_ref() .and_then(|prover| prover.borrow().get_vk()), + TaskType::Bundle => None, _ => unreachable!(), } } - fn get_proof_data(&self, task_type: ProofType, task: &crate::types::Task) -> Result { + fn get_proof_data(&self, task_type: TaskType, task: &crate::types::Task) -> Result { match task_type { - ProofType::Chunk => self.gen_chunk_proof(task), - ProofType::Batch => self.gen_batch_proof(task), + TaskType::Chunk => self.gen_chunk_proof(task), + TaskType::Batch => self.gen_batch_proof(task), _ => unreachable!(), } } @@ -216,7 +221,7 @@ impl CircuitsHandler for NextCircuitsHandler { mod tests { use super::*; use crate::zk_circuits_handler::utils::encode_vk; - use prover_next::utils::chunk_trace_to_witness_block; + use prover_curie::utils::chunk_trace_to_witness_block; use std::{path::PathBuf, sync::LazyLock}; #[ctor::ctor] @@ -251,12 +256,11 @@ mod tests { #[test] fn test_circuits() -> Result<()> { - let chunk_handler = - NextCircuitsHandler::new(ProofType::Chunk, &PARAMS_PATH, &ASSETS_PATH, None)?; + let chunk_handler = CurieHandler::new(ProverType::Chunk, &PARAMS_PATH, &ASSETS_PATH, None)?; - let chunk_vk = chunk_handler.get_vk(ProofType::Chunk).unwrap(); + let chunk_vk = chunk_handler.get_vk(TaskType::Chunk).unwrap(); - check_vk(ProofType::Chunk, chunk_vk, "chunk vk must be available"); + check_vk(TaskType::Chunk, chunk_vk, "chunk vk must be available"); let chunk_dir_paths = get_chunk_dir_paths()?; log::info!("chunk_dir_paths, {:?}", chunk_dir_paths); let mut chunk_infos = vec![]; @@ -276,10 +280,9 @@ mod tests { chunk_proofs.push(chunk_proof); } - let batch_handler = - NextCircuitsHandler::new(ProofType::Batch, &PARAMS_PATH, &ASSETS_PATH, None)?; - let batch_vk = batch_handler.get_vk(ProofType::Batch).unwrap(); - check_vk(ProofType::Batch, batch_vk, "batch vk must be available"); + let batch_handler = CurieHandler::new(ProverType::Batch, &PARAMS_PATH, &ASSETS_PATH, None)?; + let batch_vk = batch_handler.get_vk(TaskType::Batch).unwrap(); + check_vk(TaskType::Batch, batch_vk, "batch vk must be available"); let chunk_hashes_proofs = chunk_infos.into_iter().zip(chunk_proofs).collect(); log::info!("start to prove batch"); let batch_proof = batch_handler.gen_batch_proof_raw(chunk_hashes_proofs)?; @@ -289,18 +292,19 @@ mod tests { Ok(()) } - fn check_vk(proof_type: ProofType, vk: Vec, info: &str) { - log::info!("check_vk, {:?}", proof_type); - let vk_from_file = read_vk(proof_type).unwrap(); + fn check_vk(task_type: TaskType, vk: Vec, info: &str) { + log::info!("check_vk, {:?}", task_type); + let vk_from_file = read_vk(task_type).unwrap(); assert_eq!(vk_from_file, encode_vk(vk), "{info}") } - fn read_vk(proof_type: ProofType) -> Result { - log::info!("read_vk, {:?}", proof_type); - let vk_file = match proof_type { - ProofType::Chunk => CHUNK_VK_PATH.clone(), - ProofType::Batch => BATCH_VK_PATH.clone(), - ProofType::Undefined => unreachable!(), + fn read_vk(task_type: TaskType) -> Result { + log::info!("read_vk, {:?}", task_type); + let vk_file = match task_type { + TaskType::Chunk => CHUNK_VK_PATH.clone(), + TaskType::Batch => BATCH_VK_PATH.clone(), + TaskType::Bundle => unreachable!(), + TaskType::Undefined => unreachable!(), }; let data = std::fs::read(vk_file)?; diff --git a/prover/src/zk_circuits_handler/bernoulli.rs b/prover/src/zk_circuits_handler/darwin.rs similarity index 68% rename from prover/src/zk_circuits_handler/bernoulli.rs rename to prover/src/zk_circuits_handler/darwin.rs index bde85c1dc2..bbbd0251f1 100644 --- a/prover/src/zk_circuits_handler/bernoulli.rs +++ b/prover/src/zk_circuits_handler/darwin.rs @@ -1,27 +1,34 @@ use super::CircuitsHandler; -use crate::{geth_client::GethClient, types::ProofType}; -use anyhow::{bail, Ok, Result}; +use crate::{ + geth_client::GethClient, + types::{ProverType, TaskType}, +}; +use anyhow::{bail, Context, Ok, Result}; use once_cell::sync::Lazy; use serde::Deserialize; use crate::types::{CommonHash, Task}; -use prover::{ - aggregator::Prover as BatchProver, zkevm::Prover as ChunkProver, BatchProof, BlockTrace, - ChunkHash, ChunkProof, -}; use std::{cell::RefCell, cmp::Ordering, env, rc::Rc}; +use prover_darwin::{ + aggregator::Prover as BatchProver, check_chunk_hashes, zkevm::Prover as ChunkProver, + BatchProof, BatchProvingTask, BlockTrace, BundleProof, BundleProvingTask, ChunkInfo, + ChunkProof, ChunkProvingTask, +}; + // Only used for debugging. -pub(crate) static OUTPUT_DIR: Lazy> = - Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok()); +static OUTPUT_DIR: Lazy> = Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok()); -#[derive(Deserialize)] +#[derive(Debug, Clone, Deserialize)] pub struct BatchTaskDetail { - pub chunk_infos: Vec, - pub chunk_proofs: Vec, + pub chunk_infos: Vec, + #[serde(flatten)] + pub batch_proving_task: BatchProvingTask, } -#[derive(Deserialize)] +type BundleTaskDetail = BundleProvingTask; + +#[derive(Debug, Clone, Deserialize)] pub struct ChunkTaskDetail { pub block_hashes: Vec, } @@ -30,44 +37,44 @@ fn get_block_number(block_trace: &BlockTrace) -> Option { block_trace.header.number.map(|n| n.as_u64()) } -pub struct BaseCircuitsHandler { +#[derive(Default)] +pub struct DarwinHandler { chunk_prover: Option>, batch_prover: Option>, geth_client: Option>>, } -impl BaseCircuitsHandler { +impl DarwinHandler { pub fn new( - proof_type: ProofType, + prover_type: ProverType, params_dir: &str, assets_dir: &str, geth_client: Option>>, ) -> Result { - match proof_type { - ProofType::Chunk => Ok(Self { + match prover_type { + ProverType::Chunk => Ok(Self { chunk_prover: Some(RefCell::new(ChunkProver::from_dirs(params_dir, assets_dir))), batch_prover: None, geth_client, }), - ProofType::Batch => Ok(Self { + ProverType::Batch => Ok(Self { batch_prover: Some(RefCell::new(BatchProver::from_dirs(params_dir, assets_dir))), chunk_prover: None, geth_client, }), - _ => bail!("proof type invalid"), } } fn gen_chunk_proof_raw(&self, chunk_trace: Vec) -> Result { if let Some(prover) = self.chunk_prover.as_ref() { - let chunk_proof = prover.borrow_mut().gen_chunk_proof( - chunk_trace, - None, - None, - self.get_output_dir(), - )?; + let chunk = ChunkProvingTask::from(chunk_trace); + + let chunk_proof = + prover + .borrow_mut() + .gen_chunk_proof(chunk, None, None, self.get_output_dir())?; return Ok(chunk_proof); } @@ -80,22 +87,26 @@ impl BaseCircuitsHandler { Ok(serde_json::to_string(&chunk_proof)?) } - fn gen_batch_proof_raw( - &self, - chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)>, - ) -> Result { + fn gen_batch_proof_raw(&self, batch_task_detail: BatchTaskDetail) -> Result { if let Some(prover) = self.batch_prover.as_ref() { + let chunk_hashes_proofs: Vec<(ChunkInfo, ChunkProof)> = batch_task_detail + .chunk_infos + .clone() + .into_iter() + .zip(batch_task_detail.batch_proving_task.chunk_proofs.clone()) + .collect(); + let chunk_proofs: Vec = chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect(); - let is_valid = prover.borrow_mut().check_chunk_proofs(&chunk_proofs); + let is_valid = prover.borrow_mut().check_protocol_of_chunks(&chunk_proofs); if !is_valid { bail!("non-match chunk protocol") } - - let batch_proof = prover.borrow_mut().gen_agg_evm_proof( - chunk_hashes_proofs, + check_chunk_hashes("", &chunk_hashes_proofs).context("failed to check chunk info")?; + let batch_proof = prover.borrow_mut().gen_batch_proof( + batch_task_detail.batch_proving_task, None, self.get_output_dir(), )?; @@ -107,12 +118,32 @@ impl BaseCircuitsHandler { fn gen_batch_proof(&self, task: &crate::types::Task) -> Result { log::info!("[circuit] gen_batch_proof for task {}", task.id); - let chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)> = - self.gen_chunk_hashes_proofs(task)?; - let batch_proof = self.gen_batch_proof_raw(chunk_hashes_proofs)?; + + let batch_task_detail: BatchTaskDetail = serde_json::from_str(&task.task_data)?; + let batch_proof = self.gen_batch_proof_raw(batch_task_detail)?; Ok(serde_json::to_string(&batch_proof)?) } + fn gen_bundle_proof_raw(&self, bundle_task_detail: BundleTaskDetail) -> Result { + if let Some(prover) = self.batch_prover.as_ref() { + let bundle_proof = prover.borrow_mut().gen_bundle_proof( + bundle_task_detail, + None, + self.get_output_dir(), + )?; + + return Ok(bundle_proof); + } + unreachable!("please check errors in proof_type logic") + } + + fn gen_bundle_proof(&self, task: &crate::types::Task) -> Result { + log::info!("[circuit] gen_bundle_proof for task {}", task.id); + let bundle_task_detail: BundleTaskDetail = serde_json::from_str(&task.task_data)?; + let bundle_proof = self.gen_bundle_proof_raw(bundle_task_detail)?; + Ok(serde_json::to_string(&bundle_proof)?) + } + fn get_output_dir(&self) -> Option<&str> { OUTPUT_DIR.as_deref() } @@ -122,17 +153,6 @@ impl BaseCircuitsHandler { self.get_sorted_traces_by_hashes(&chunk_task_detail.block_hashes) } - fn gen_chunk_hashes_proofs(&self, task: &Task) -> Result> { - let batch_task_detail: BatchTaskDetail = serde_json::from_str(&task.task_data)?; - - Ok(batch_task_detail - .chunk_infos - .clone() - .into_iter() - .zip(batch_task_detail.chunk_proofs.clone()) - .collect()) - } - fn get_sorted_traces_by_hashes(&self, block_hashes: &[CommonHash]) -> Result> { if block_hashes.is_empty() { log::error!("[prover] failed to get sorted traces: block_hashes are empty"); @@ -187,25 +207,30 @@ impl BaseCircuitsHandler { } } -impl CircuitsHandler for BaseCircuitsHandler { - fn get_vk(&self, task_type: ProofType) -> Option> { +impl CircuitsHandler for DarwinHandler { + fn get_vk(&self, task_type: TaskType) -> Option> { match task_type { - ProofType::Chunk => self + TaskType::Chunk => self .chunk_prover .as_ref() .and_then(|prover| prover.borrow().get_vk()), - ProofType::Batch => self + TaskType::Batch => self .batch_prover .as_ref() - .and_then(|prover| prover.borrow().get_vk()), + .and_then(|prover| prover.borrow().get_batch_vk()), + TaskType::Bundle => self + .batch_prover + .as_ref() + .and_then(|prover| prover.borrow().get_bundle_vk()), _ => unreachable!(), } } - fn get_proof_data(&self, task_type: ProofType, task: &crate::types::Task) -> Result { + fn get_proof_data(&self, task_type: TaskType, task: &crate::types::Task) -> Result { match task_type { - ProofType::Chunk => self.gen_chunk_proof(task), - ProofType::Batch => self.gen_batch_proof(task), + TaskType::Chunk => self.gen_chunk_proof(task), + TaskType::Batch => self.gen_batch_proof(task), + TaskType::Bundle => self.gen_bundle_proof(task), _ => unreachable!(), } } @@ -217,7 +242,7 @@ impl CircuitsHandler for BaseCircuitsHandler { mod tests { use super::*; use crate::zk_circuits_handler::utils::encode_vk; - use prover::utils::chunk_trace_to_witness_block; + use prover_darwin::utils::chunk_trace_to_witness_block; use std::{path::PathBuf, sync::LazyLock}; #[ctor::ctor] @@ -228,7 +253,7 @@ mod tests { static DEFAULT_WORK_DIR: &str = "/assets"; static WORK_DIR: LazyLock = LazyLock::new(|| { - std::env::var("BERNOULLI_TEST_DIR") + std::env::var("CURIE_TEST_DIR") .unwrap_or(String::from(DEFAULT_WORK_DIR)) .trim_end_matches('/') .to_string() @@ -253,11 +278,11 @@ mod tests { #[test] fn test_circuits() -> Result<()> { let chunk_handler = - BaseCircuitsHandler::new(ProofType::Chunk, &PARAMS_PATH, &ASSETS_PATH, None)?; + DarwinHandler::new(ProverType::Chunk, &PARAMS_PATH, &ASSETS_PATH, None)?; - let chunk_vk = chunk_handler.get_vk(ProofType::Chunk).unwrap(); + let chunk_vk = chunk_handler.get_vk(TaskType::Chunk).unwrap(); - check_vk(ProofType::Chunk, chunk_vk, "chunk vk must be available"); + check_vk(TaskType::Chunk, chunk_vk, "chunk vk must be available"); let chunk_dir_paths = get_chunk_dir_paths()?; log::info!("chunk_dir_paths, {:?}", chunk_dir_paths); let mut chunk_infos = vec![]; @@ -278,30 +303,44 @@ mod tests { } let batch_handler = - BaseCircuitsHandler::new(ProofType::Batch, &PARAMS_PATH, &ASSETS_PATH, None)?; - let batch_vk = batch_handler.get_vk(ProofType::Batch).unwrap(); - check_vk(ProofType::Batch, batch_vk, "batch vk must be available"); - let chunk_hashes_proofs = chunk_infos.into_iter().zip(chunk_proofs).collect(); + DarwinHandler::new(ProverType::Batch, &PARAMS_PATH, &ASSETS_PATH, None)?; + let batch_vk = batch_handler.get_vk(TaskType::Batch).unwrap(); + check_vk(TaskType::Batch, batch_vk, "batch vk must be available"); + let batch_task_detail = make_batch_task_detail(chunk_infos, chunk_proofs); log::info!("start to prove batch"); - let batch_proof = batch_handler.gen_batch_proof_raw(chunk_hashes_proofs)?; + let batch_proof = batch_handler.gen_batch_proof_raw(batch_task_detail)?; let proof_data = serde_json::to_string(&batch_proof)?; dump_proof("batch_proof".to_string(), proof_data)?; Ok(()) } - fn check_vk(proof_type: ProofType, vk: Vec, info: &str) { + fn make_batch_task_detail(_: Vec, _: Vec) -> BatchTaskDetail { + todo!(); + // BatchTaskDetail { + // chunk_infos, + // batch_proving_task: BatchProvingTask { + // parent_batch_hash: todo!(), + // parent_state_root: todo!(), + // batch_header: todo!(), + // chunk_proofs, + // }, + // } + } + + fn check_vk(proof_type: TaskType, vk: Vec, info: &str) { log::info!("check_vk, {:?}", proof_type); let vk_from_file = read_vk(proof_type).unwrap(); assert_eq!(vk_from_file, encode_vk(vk), "{info}") } - fn read_vk(proof_type: ProofType) -> Result { + fn read_vk(proof_type: TaskType) -> Result { log::info!("read_vk, {:?}", proof_type); let vk_file = match proof_type { - ProofType::Chunk => CHUNK_VK_PATH.clone(), - ProofType::Batch => BATCH_VK_PATH.clone(), - ProofType::Undefined => unreachable!(), + TaskType::Chunk => CHUNK_VK_PATH.clone(), + TaskType::Batch => BATCH_VK_PATH.clone(), + TaskType::Bundle => todo!(), + TaskType::Undefined => unreachable!(), }; let data = std::fs::read(vk_file)?; @@ -375,9 +414,9 @@ mod tests { Ok(files.into_iter().map(|f| batch_path.join(f)).collect()) } - fn traces_to_chunk_info(chunk_trace: Vec) -> Result { + fn traces_to_chunk_info(chunk_trace: Vec) -> Result { let witness_block = chunk_trace_to_witness_block(chunk_trace)?; - Ok(ChunkHash::from_witness_block(&witness_block, false)) + Ok(ChunkInfo::from_witness_block(&witness_block, false)) } fn dump_proof(id: String, proof_data: String) -> Result<()> { diff --git a/rollup/abi/bridge_abi.go b/rollup/abi/bridge_abi.go index 20e5831d53..35f97824ff 100644 --- a/rollup/abi/bridge_abi.go +++ b/rollup/abi/bridge_abi.go @@ -24,7 +24,7 @@ func init() { // ScrollChainMetaData contains all meta data concerning the ScrollChain contract. var ScrollChainMetaData = &bind.MetaData{ - ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"CommitBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"stateRoot\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"}],\"name\":\"FinalizeBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"RevertBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"oldMaxNumTxInChunk\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newMaxNumTxInChunk\",\"type\":\"uint256\"}],\"name\":\"UpdateMaxNumTxInChunk\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"status\",\"type\":\"bool\"}],\"name\":\"UpdateProver\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"status\",\"type\":\"bool\"}],\"name\":\"UpdateSequencer\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"parentBatchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes[]\",\"name\":\"chunks\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"skippedL1MessageBitmap\",\"type\":\"bytes\"}],\"name\":\"commitBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"committedBatches\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"postStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"}],\"name\":\"finalizeBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"postStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"blobDataProof\",\"type\":\"bytes\"}],\"name\":\"finalizeBatch4844\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"postStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"aggrProof\",\"type\":\"bytes\"}],\"name\":\"finalizeBatchWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"postStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"blobDataProof\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"aggrProof\",\"type\":\"bytes\"}],\"name\":\"finalizeBatchWithProof4844\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"finalizedStateRoots\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"_stateRoot\",\"type\":\"bytes32\"}],\"name\":\"importGenesisBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"isBatchFinalized\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastFinalizedBatchIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"count\",\"type\":\"uint256\"}],\"name\":\"revertBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"withdrawRoots\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + ABI: "[{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"CommitBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"stateRoot\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"FinalizeBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"RevertBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"oldMaxNumTxInChunk\",\"type\": \"uint256\"},{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"newMaxNumTxInChunk\",\"type\": \"uint256\"}],\"name\": \"UpdateMaxNumTxInChunk\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateProver\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateSequencer\",\"type\": \"event\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"}],\"name\": \"commitBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"commitBatchWithBlobProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"committedBatches\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatch4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBundle\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBundleWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"finalizedStateRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"_batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"_stateRoot\",\"type\": \"bytes32\"}],\"name\": \"importGenesisBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"isBatchFinalized\",\"outputs\": [{\"internalType\": \"bool\",\"name\": \"\",\"type\": \"bool\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [],\"name\": \"lastFinalizedBatchIndex\",\"outputs\": [{\"internalType\": \"uint256\",\"name\": \"\",\"type\": \"uint256\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"uint256\",\"name\": \"count\",\"type\": \"uint256\"}],\"name\": \"revertBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"withdrawRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"}]", } // L2GasPriceOracleMetaData contains all meta data concerning the L2GasPriceOracle contract. diff --git a/rollup/cmd/rollup_relayer/app/app.go b/rollup/cmd/rollup_relayer/app/app.go index 0a208a88d3..74a9e17e0c 100644 --- a/rollup/cmd/rollup_relayer/app/app.go +++ b/rollup/cmd/rollup_relayer/app/app.go @@ -85,14 +85,8 @@ func action(ctx *cli.Context) error { } chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, genesis.Config, db, registry) - if err != nil { - log.Crit("failed to create chunkProposer", "config file", cfgFile, "error", err) - } - batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, genesis.Config, db, registry) - if err != nil { - log.Crit("failed to create batchProposer", "config file", cfgFile, "error", err) - } + bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, genesis.Config, db, registry) l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry) @@ -110,10 +104,14 @@ func action(ctx *cli.Context) error { go utils.Loop(subCtx, time.Duration(cfg.L2Config.BatchProposerConfig.ProposeIntervalMilliseconds)*time.Millisecond, batchProposer.TryProposeBatch) + go utils.Loop(subCtx, 10*time.Second, bundleProposer.TryProposeBundle) + go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches) go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches) + go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessPendingBundles) + // Finish start all rollup relayer functions. log.Info("Start rollup-relayer successfully", "version", version.Version) diff --git a/rollup/conf/config.json b/rollup/conf/config.json index 9d0c79aff8..88a64337c2 100644 --- a/rollup/conf/config.json +++ b/rollup/conf/config.json @@ -55,6 +55,7 @@ }, "enable_test_env_bypass_features": true, "finalize_batch_without_proof_timeout_sec": 7200, + "finalize_bundle_without_proof_timeout_sec": 7200, "gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313", "commit_sender_private_key": "1414141414141414141414141414141414141414141414141414141414141414", "finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515", @@ -78,6 +79,10 @@ "batch_timeout_sec": 300, "gas_cost_increase_multiplier": 1.2, "max_uncompressed_batch_bytes_size": 634880 + }, + "bundle_proposer_config": { + "max_batch_num_per_bundle": 20, + "bundle_timeout_sec": 36000 } }, "db_config": { diff --git a/rollup/go.mod b/rollup/go.mod index 8013473805..0f056bf6b4 100644 --- a/rollup/go.mod +++ b/rollup/go.mod @@ -10,8 +10,8 @@ require ( github.com/go-resty/resty/v2 v2.7.0 github.com/holiman/uint256 v1.2.4 github.com/prometheus/client_golang v1.16.0 - github.com/scroll-tech/da-codec v0.0.0-20240712125636-d7e76c3f54b5 - github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4 + github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb + github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 github.com/smartystreets/goconvey v1.8.0 github.com/stretchr/testify v1.9.0 github.com/urfave/cli/v2 v2.25.7 @@ -21,7 +21,7 @@ require ( require ( github.com/VictoriaMetrics/fastcache v1.12.1 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.12.0 // indirect + github.com/bits-and-blooms/bitset v1.13.0 // indirect github.com/btcsuite/btcd v0.20.1-beta // indirect github.com/bytedance/sonic v1.10.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect @@ -32,7 +32,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/deckarep/golang-set v1.8.0 // indirect github.com/edsrzf/mmap-go v1.0.0 // indirect - github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect + github.com/ethereum/c-kzg-4844 v1.0.2 // indirect github.com/fjl/memsize v0.0.2 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect @@ -55,7 +55,7 @@ require ( github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/huin/goupnp v1.3.0 // indirect - github.com/iden3/go-iden3-crypto v0.0.15 // indirect + github.com/iden3/go-iden3-crypto v0.0.16 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect @@ -90,21 +90,21 @@ require ( github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/smartystreets/assertions v1.13.1 // indirect github.com/status-im/keycard-go v0.2.0 // indirect - github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 // indirect + github.com/supranational/blst v0.3.12 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/ugorji/go/codec v1.2.11 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect golang.org/x/arch v0.5.0 // indirect - golang.org/x/crypto v0.17.0 // indirect - golang.org/x/net v0.18.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect diff --git a/rollup/go.sum b/rollup/go.sum index 9ef563f899..b485743b10 100644 --- a/rollup/go.sum +++ b/rollup/go.sum @@ -11,8 +11,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA= -github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= +github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= @@ -54,8 +54,8 @@ github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS3 github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA= -github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c= +github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s= +github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -135,8 +135,8 @@ github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXei github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4= -github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= +github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk= +github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -236,10 +236,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/da-codec v0.0.0-20240712125636-d7e76c3f54b5 h1:mdgFgYSKbB7JbUPEvqKdXxXlzc3uRwD+dlNA4GsFSoo= -github.com/scroll-tech/da-codec v0.0.0-20240712125636-d7e76c3f54b5/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= -github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4 h1:gheWXra3HdZsz6q+w4LrXy8ybHOO6/t6Kb/V64bR5wE= -github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ= +github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA= +github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= +github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ= +github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= @@ -267,14 +267,14 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZBNiZO37uWS/nDaKiIwHz4mDo4pnA+fqTO0= -github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8= +github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= @@ -286,8 +286,8 @@ github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6S github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y= golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= @@ -295,8 +295,8 @@ golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -306,16 +306,16 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -336,17 +336,15 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/rollup/internal/config/l2.go b/rollup/internal/config/l2.go index f77187a445..4db24fb73e 100644 --- a/rollup/internal/config/l2.go +++ b/rollup/internal/config/l2.go @@ -22,6 +22,8 @@ type L2Config struct { ChunkProposerConfig *ChunkProposerConfig `json:"chunk_proposer_config"` // The batch_proposer config BatchProposerConfig *BatchProposerConfig `json:"batch_proposer_config"` + // The bundle_proposer config + BundleProposerConfig *BundleProposerConfig `json:"bundle_proposer_config"` } // ChunkProposerConfig loads chunk_proposer configuration items. @@ -46,3 +48,9 @@ type BatchProposerConfig struct { GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"` MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"` } + +// BundleProposerConfig loads bundle_proposer configuration items. +type BundleProposerConfig struct { + MaxBatchNumPerBundle uint64 `json:"max_batch_num_per_bundle"` + BundleTimeoutSec uint64 `json:"bundle_timeout_sec"` +} diff --git a/rollup/internal/config/relayer.go b/rollup/internal/config/relayer.go index d33d8a83ca..aa1fbeea8b 100644 --- a/rollup/internal/config/relayer.go +++ b/rollup/internal/config/relayer.go @@ -66,6 +66,8 @@ type RelayerConfig struct { EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"` // The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true. FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"` + // The timeout in seconds for finalizing a bundle without proof, only used when EnableTestEnvBypassFeatures is true. + FinalizeBundleWithoutProofTimeoutSec uint64 `json:"finalize_bundle_without_proof_timeout_sec"` } // GasOracleConfig The config for updating gas price oracle. diff --git a/rollup/internal/controller/relayer/l2_relayer.go b/rollup/internal/controller/relayer/l2_relayer.go index 02368f73c4..fb0e84933b 100644 --- a/rollup/internal/controller/relayer/l2_relayer.go +++ b/rollup/internal/controller/relayer/l2_relayer.go @@ -6,6 +6,7 @@ import ( "fmt" "math/big" "sort" + "strings" "time" "github.com/go-resty/resty/v2" @@ -14,6 +15,7 @@ import ( "github.com/scroll-tech/da-codec/encoding/codecv0" "github.com/scroll-tech/da-codec/encoding/codecv1" "github.com/scroll-tech/da-codec/encoding/codecv2" + "github.com/scroll-tech/da-codec/encoding/codecv3" "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" gethTypes "github.com/scroll-tech/go-ethereum/core/types" @@ -44,6 +46,7 @@ type Layer2Relayer struct { l2Client *ethclient.Client db *gorm.DB + bundleOrm *orm.Bundle batchOrm *orm.Batch chunkOrm *orm.Chunk l2BlockOrm *orm.L2Block @@ -123,6 +126,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm. ctx: ctx, db: db, + bundleOrm: orm.NewBundle(db), batchOrm: orm.NewBatch(db), l2BlockOrm: orm.NewL2Block(db), chunkOrm: orm.NewChunk(db), @@ -386,12 +390,18 @@ func (r *Layer2Relayer) ProcessPendingBatches() { log.Error("failed to construct commitBatch payload codecv1", "index", dbBatch.Index, "err", err) return } - } else { // codecv2 + } else if !r.chainCfg.IsDarwin(dbChunks[0].StartBlockTime) { // codecv2 calldata, blob, err = r.constructCommitBatchPayloadCodecV2(dbBatch, dbParentBatch, dbChunks, chunks) if err != nil { log.Error("failed to construct commitBatch payload codecv2", "index", dbBatch.Index, "err", err) return } + } else { // codecv3 + calldata, blob, err = r.constructCommitBatchPayloadCodecV3(dbBatch, dbParentBatch, dbChunks, chunks) + if err != nil { + log.Error("failed to construct commitBatch payload codecv3", "index", dbBatch.Index, "err", err) + return + } } // fallbackGasLimit is non-zero only in sending non-blob transactions. @@ -410,14 +420,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() { "hash", dbBatch.Hash, "RollupContractAddress", r.cfg.RollupContractAddress, "err", err, - ) - log.Debug( - "Failed to send commitBatch tx to layer1", - "index", dbBatch.Index, - "hash", dbBatch.Hash, - "RollupContractAddress", r.cfg.RollupContractAddress, "calldata", common.Bytes2Hex(calldata), - "err", err, ) return } @@ -487,7 +490,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() { "batch proving failed", "Index", batch.Index, "Hash", batch.Hash, - "ProverAssignedAt", batch.ProverAssignedAt, "ProvedAt", batch.ProvedAt, "ProofTimeSec", batch.ProofTimeSec, ) @@ -497,8 +499,53 @@ func (r *Layer2Relayer) ProcessCommittedBatches() { } } +// ProcessPendingBundles submits proof to layer 1 rollup contract +func (r *Layer2Relayer) ProcessPendingBundles() { + r.metrics.rollupL2RelayerProcessPendingBundlesTotal.Inc() + + bundle, err := r.bundleOrm.GetFirstPendingBundle(r.ctx) + if bundle == nil && err == nil { + return + } + if err != nil { + log.Error("Failed to fetch first pending L2 bundle", "err", err) + return + } + + status := types.ProvingStatus(bundle.ProvingStatus) + switch status { + case types.ProvingTaskUnassigned, types.ProvingTaskAssigned: + if r.cfg.EnableTestEnvBypassFeatures && utils.NowUTC().Sub(bundle.CreatedAt) > time.Duration(r.cfg.FinalizeBundleWithoutProofTimeoutSec)*time.Second { + if err := r.finalizeBundle(bundle, false); err != nil { + log.Error("Failed to finalize timeout bundle without proof", "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err) + } + } + + case types.ProvingTaskVerified: + log.Info("Start to roll up zk proof", "hash", bundle.Hash) + r.metrics.rollupL2RelayerProcessPendingBundlesFinalizedTotal.Inc() + if err := r.finalizeBundle(bundle, true); err != nil { + log.Error("Failed to finalize bundle with proof", "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err) + } + + case types.ProvingTaskFailed: + // We were unable to prove this bundle. There are two possibilities: + // (a) Prover bug. In this case, we should fix and redeploy the prover. + // In the meantime, we continue to commit batches to L1 as well as + // proposing and proving chunks, batches and bundles. + // (b) Unprovable bundle, e.g. proof overflow. In this case we need to + // stop the ledger, fix the limit, revert all the violating blocks, + // chunks, batches, bundles and all subsequent ones, and resume, + // i.e. this case requires manual resolution. + log.Error("bundle proving failed", "index", bundle.Index, "hash", bundle.Hash, "proved at", bundle.ProvedAt, "proof time sec", bundle.ProofTimeSec) + + default: + log.Error("encounter unreachable case in ProcessPendingBundles", "proving status", status) + } +} + func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error { - // Check batch status before send `finalizeBatch` tx. + // Check batch status before sending `finalizeBatch` tx. if r.cfg.ChainMonitor.Enabled { var batchStatus bool batchStatus, err := r.getBatchStatusByIndex(dbBatch) @@ -509,8 +556,8 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error } if !batchStatus { r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc() - log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", dbBatch.Index) - return err + log.Error("the batch status is false, stop finalize batch and check the reason", "batch_index", dbBatch.Index) + return errors.New("the batch status is false") } } @@ -544,7 +591,7 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error if !r.chainCfg.IsBernoulli(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) { // codecv0 calldata, err = r.constructFinalizeBatchPayloadCodecV0(dbBatch, dbParentBatch, aggProof) if err != nil { - return fmt.Errorf("failed to construct commitBatch payload codecv0, index: %v, err: %w", dbBatch.Index, err) + return fmt.Errorf("failed to construct finalizeBatch payload codecv0, index: %v, err: %w", dbBatch.Index, err) } } else if !r.chainCfg.IsCurie(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) { // codecv1 chunks := make([]*encoding.Chunk, len(dbChunks)) @@ -558,9 +605,9 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error calldata, err = r.constructFinalizeBatchPayloadCodecV1(dbBatch, dbParentBatch, dbChunks, chunks, aggProof) if err != nil { - return fmt.Errorf("failed to construct commitBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err) + return fmt.Errorf("failed to construct finalizeBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err) } - } else { // codecv2 + } else if !r.chainCfg.IsDarwin(dbChunks[0].StartBlockTime) { // codecv2 chunks := make([]*encoding.Chunk, len(dbChunks)) for i, c := range dbChunks { blocks, dbErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber) @@ -572,8 +619,11 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error calldata, err = r.constructFinalizeBatchPayloadCodecV2(dbBatch, dbParentBatch, dbChunks, chunks, aggProof) if err != nil { - return fmt.Errorf("failed to construct commitBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err) + return fmt.Errorf("failed to construct finalizeBatch payload codecv2, index: %v, err: %w", dbBatch.Index, err) } + } else { // codecv3 + log.Debug("encoding is codecv3, using finalizeBundle instead", "index", dbBatch.Index) + return nil } txHash, err := r.finalizeSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, nil, 0) @@ -585,15 +635,7 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error "hash", dbBatch.Hash, "RollupContractAddress", r.cfg.RollupContractAddress, "err", err, - ) - log.Debug( - "finalizeBatch in layer1 failed", - "with proof", withProof, - "index", dbBatch.Index, - "hash", dbBatch.Hash, - "RollupContractAddress", r.cfg.RollupContractAddress, "calldata", common.Bytes2Hex(calldata), - "err", err, ) return err } @@ -609,11 +651,11 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error // Updating the proving status when finalizing without proof, thus the coordinator could omit this task. // it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus if !withProof { - txErr := r.db.Transaction(func(tx *gorm.DB) error { - if updateErr := r.batchOrm.UpdateProvingStatus(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil { + txErr := r.db.Transaction(func(dbTX *gorm.DB) error { + if updateErr := r.batchOrm.UpdateProvingStatus(r.ctx, dbBatch.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil { return updateErr } - if updateErr := r.chunkOrm.UpdateProvingStatusByBatchHash(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil { + if updateErr := r.chunkOrm.UpdateProvingStatusByBatchHash(r.ctx, dbBatch.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil { return updateErr } return nil @@ -627,6 +669,98 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error return nil } +func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error { + // Check batch status before sending `finalizeBundle` tx. + if r.cfg.ChainMonitor.Enabled { + for batchIndex := bundle.StartBatchIndex; batchIndex <= bundle.EndBatchIndex; batchIndex++ { + tmpBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, batchIndex) + if getErr != nil { + log.Error("failed to get batch by index", "batch index", batchIndex, "error", getErr) + return getErr + } + batchStatus, getErr := r.getBatchStatusByIndex(tmpBatch) + if getErr != nil { + r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc() + log.Error("failed to get batch status, please check chain_monitor api server", "batch_index", tmpBatch.Index, "err", getErr) + return getErr + } + if !batchStatus { + r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc() + log.Error("the batch status is false, stop finalize batch and check the reason", "batch_index", tmpBatch.Index) + return errors.New("the batch status is false") + } + } + } + + dbBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, bundle.EndBatchIndex) + if err != nil { + log.Error("failed to get batch by index", "batch index", bundle.EndBatchIndex, "error", err) + return err + } + + var aggProof *message.BundleProof + if withProof { + aggProof, err = r.bundleOrm.GetVerifiedProofByHash(r.ctx, bundle.Hash) + if err != nil { + return fmt.Errorf("failed to get verified proof by bundle index: %d, err: %w", bundle.Index, err) + } + + if err = aggProof.SanityCheck(); err != nil { + return fmt.Errorf("failed to check agg_proof sanity, index: %d, err: %w", bundle.Index, err) + } + } + + calldata, err := r.constructFinalizeBundlePayloadCodecV3(dbBatch, aggProof) + if err != nil { + return fmt.Errorf("failed to construct finalizeBundle payload codecv3, index: %v, err: %w", dbBatch.Index, err) + } + + txHash, err := r.finalizeSender.SendTransaction("finalizeBundle-"+bundle.Hash, &r.cfg.RollupContractAddress, calldata, nil, 0) + if err != nil { + log.Error("finalizeBundle in layer1 failed", "with proof", withProof, "index", bundle.Index, + "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, + "RollupContractAddress", r.cfg.RollupContractAddress, "err", err, "calldata", common.Bytes2Hex(calldata)) + return err + } + + log.Info("finalizeBundle in layer1", "with proof", withProof, "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "tx hash", txHash.String()) + + // Updating rollup status in database. + if err := r.bundleOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, bundle.Hash, txHash.String(), types.RollupFinalizing); err != nil { + log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", bundle.Index, "bundle hash", bundle.Hash, "tx hash", txHash.String(), "err", err) + return err + } + + // Updating the proving status when finalizing without proof, thus the coordinator could omit this task. + // it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus + if !withProof { + txErr := r.db.Transaction(func(dbTX *gorm.DB) error { + if updateErr := r.bundleOrm.UpdateProvingStatus(r.ctx, bundle.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil { + return updateErr + } + if updateErr := r.batchOrm.UpdateProvingStatusByBundleHash(r.ctx, bundle.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil { + return updateErr + } + for batchIndex := bundle.StartBatchIndex; batchIndex <= bundle.EndBatchIndex; batchIndex++ { + tmpBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, batchIndex) + if getErr != nil { + return getErr + } + if updateErr := r.chunkOrm.UpdateProvingStatusByBatchHash(r.ctx, tmpBatch.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil { + return updateErr + } + } + return nil + }) + if txErr != nil { + log.Error("Updating chunk and batch proving status when finalizing without proof failure", "bundleHash", bundle.Hash, "err", txErr) + } + } + + r.metrics.rollupL2RelayerProcessPendingBundlesFinalizedSuccessTotal.Inc() + return nil +} + // batchStatusResponse the response schema type batchStatusResponse struct { ErrCode int `json:"errcode"` @@ -691,6 +825,36 @@ func (r *Layer2Relayer) handleConfirmation(cfm *sender.Confirmation) { log.Warn("UpdateCommitTxHashAndRollupStatus failed", "confirmation", cfm, "err", err) } case types.SenderTypeFinalizeBatch: + if strings.HasPrefix(cfm.ContextID, "finalizeBundle-") { + bundleHash := strings.TrimPrefix(cfm.ContextID, "finalizeBundle-") + var status types.RollupStatus + if cfm.IsSuccessful { + status = types.RollupFinalized + r.metrics.rollupL2BundlesFinalizedConfirmedTotal.Inc() + } else { + status = types.RollupFinalizeFailed + r.metrics.rollupL2BundlesFinalizedConfirmedFailedTotal.Inc() + log.Warn("FinalizeBundleTxType transaction confirmed but failed in layer1", "confirmation", cfm) + } + + err := r.db.Transaction(func(dbTX *gorm.DB) error { + if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatusByBundleHash(r.ctx, bundleHash, cfm.TxHash.String(), status); err != nil { + log.Warn("UpdateFinalizeTxHashAndRollupStatusByBundleHash failed", "confirmation", cfm, "err", err) + return err + } + + if err := r.bundleOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, bundleHash, cfm.TxHash.String(), status); err != nil { + log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "confirmation", cfm, "err", err) + return err + } + return nil + }) + if err != nil { + log.Warn("failed to update rollup status of bundle and batches", "err", err) + } + return + } + var status types.RollupStatus if cfm.IsSuccessful { status = types.RollupFinalized @@ -836,6 +1000,45 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV2(dbBatch *orm.Batch, d return calldata, daBatch.Blob(), nil } +func (r *Layer2Relayer) constructCommitBatchPayloadCodecV3(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, *kzg4844.Blob, error) { + batch := &encoding.Batch{ + Index: dbBatch.Index, + TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore, + ParentBatchHash: common.HexToHash(dbParentBatch.Hash), + Chunks: chunks, + } + + daBatch, createErr := codecv3.NewDABatch(batch) + if createErr != nil { + return nil, nil, fmt.Errorf("failed to create DA batch: %w", createErr) + } + + encodedChunks := make([][]byte, len(dbChunks)) + for i, c := range dbChunks { + daChunk, createErr := codecv3.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore) + if createErr != nil { + return nil, nil, fmt.Errorf("failed to create DA chunk: %w", createErr) + } + encodedChunks[i] = daChunk.Encode() + } + + blobDataProof, err := daBatch.BlobDataProofForPointEvaluation() + if err != nil { + return nil, nil, fmt.Errorf("failed to get blob data proof for point evaluation: %w", err) + } + + skippedL1MessageBitmap, _, err := encoding.ConstructSkippedBitmap(batch.Index, batch.Chunks, batch.TotalL1MessagePoppedBefore) + if err != nil { + return nil, nil, fmt.Errorf("failed to construct skipped L1 message bitmap: %w", err) + } + + calldata, packErr := r.l1RollupABI.Pack("commitBatchWithBlobProof", daBatch.Version, dbParentBatch.BatchHeader, encodedChunks, skippedL1MessageBitmap, blobDataProof) + if packErr != nil { + return nil, nil, fmt.Errorf("failed to pack commitBatchWithBlobProof: %w", packErr) + } + return calldata, daBatch.Blob(), nil +} + func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV0(dbBatch *orm.Batch, dbParentBatch *orm.Batch, aggProof *message.BatchProof) ([]byte, error) { if aggProof != nil { // finalizeBatch with proof. calldata, packErr := r.l1RollupABI.Pack( @@ -964,6 +1167,34 @@ func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV2(dbBatch *orm.Batch, return calldata, nil } +func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV3(dbBatch *orm.Batch, aggProof *message.BundleProof) ([]byte, error) { + if aggProof != nil { // finalizeBundle with proof. + calldata, packErr := r.l1RollupABI.Pack( + "finalizeBundleWithProof", + dbBatch.BatchHeader, + common.HexToHash(dbBatch.StateRoot), + common.HexToHash(dbBatch.WithdrawRoot), + aggProof.Proof, + ) + if packErr != nil { + return nil, fmt.Errorf("failed to pack finalizeBundleWithProof: %w", packErr) + } + return calldata, nil + } + + // finalizeBundle without proof. + calldata, packErr := r.l1RollupABI.Pack( + "finalizeBundle", + dbBatch.BatchHeader, + common.HexToHash(dbBatch.StateRoot), + common.HexToHash(dbBatch.WithdrawRoot), + ) + if packErr != nil { + return nil, fmt.Errorf("failed to pack finalizeBundle: %w", packErr) + } + return calldata, nil +} + // StopSenders stops the senders of the rollup-relayer to prevent querying the removed pending_transaction table in unit tests. // for unit test func (r *Layer2Relayer) StopSenders() { diff --git a/rollup/internal/controller/relayer/l2_relayer_metrics.go b/rollup/internal/controller/relayer/l2_relayer_metrics.go index 0d03b69ad9..82b0f248f0 100644 --- a/rollup/internal/controller/relayer/l2_relayer_metrics.go +++ b/rollup/internal/controller/relayer/l2_relayer_metrics.go @@ -23,6 +23,11 @@ type l2RelayerMetrics struct { rollupL2UpdateGasOracleConfirmedFailedTotal prometheus.Counter rollupL2ChainMonitorLatestFailedCall prometheus.Counter rollupL2ChainMonitorLatestFailedBatchStatus prometheus.Counter + rollupL2RelayerProcessPendingBundlesTotal prometheus.Counter + rollupL2RelayerProcessPendingBundlesFinalizedTotal prometheus.Counter + rollupL2RelayerProcessPendingBundlesFinalizedSuccessTotal prometheus.Counter + rollupL2BundlesFinalizedConfirmedTotal prometheus.Counter + rollupL2BundlesFinalizedConfirmedFailedTotal prometheus.Counter } var ( @@ -93,6 +98,26 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics { Name: "rollup_layer2_chain_monitor_latest_failed_batch_status", Help: "The total number of failed batch status get from chain_monitor", }), + rollupL2RelayerProcessPendingBundlesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_layer2_relayer_process_pending_bundles_total", + Help: "Total number of times the layer2 relayer has processed pending bundles.", + }), + rollupL2RelayerProcessPendingBundlesFinalizedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_layer2_relayer_process_pending_bundles_finalized_total", + Help: "Total number of times the layer2 relayer has finalized proven bundle processes.", + }), + rollupL2RelayerProcessPendingBundlesFinalizedSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_layer2_relayer_process_pending_bundles_finalized_success_total", + Help: "Total number of times the layer2 relayer has successful finalized proven bundle processes.", + }), + rollupL2BundlesFinalizedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_layer2_bundles_finalized_confirmed_total", + Help: "Total number of finalized bundles confirmed on layer2.", + }), + rollupL2BundlesFinalizedConfirmedFailedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_layer2_bundles_finalized_confirmed_failed_total", + Help: "Total number of failed confirmations for finalized bundles on layer2.", + }), } }) return l2RelayerMetric diff --git a/rollup/internal/controller/relayer/l2_relayer_test.go b/rollup/internal/controller/relayer/l2_relayer_test.go index 344e259ae9..c32a75d2e5 100644 --- a/rollup/internal/controller/relayer/l2_relayer_test.go +++ b/rollup/internal/controller/relayer/l2_relayer_test.go @@ -51,15 +51,21 @@ func testCreateNewRelayer(t *testing.T) { } func testL2RelayerProcessPendingBatches(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2} + codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} for _, codecVersion := range codecVersions { db := setupL2RelayerDB(t) defer database.CloseDB(db) l2Cfg := cfg.L2Config - chainConfig := ¶ms.ChainConfig{} + var chainConfig *params.ChainConfig if codecVersion == encoding.CodecV0 { - chainConfig.BernoulliBlock = big.NewInt(0) + chainConfig = ¶ms.ChainConfig{} + } else if codecVersion == encoding.CodecV1 { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} + } else if codecVersion == encoding.CodecV2 { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} + } else { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} } relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil) @@ -107,9 +113,13 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) { defer database.CloseDB(db) l2Cfg := cfg.L2Config - chainConfig := ¶ms.ChainConfig{} + var chainConfig *params.ChainConfig if codecVersion == encoding.CodecV0 { - chainConfig.BernoulliBlock = big.NewInt(0) + chainConfig = ¶ms.ChainConfig{} + } else if codecVersion == encoding.CodecV1 { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} + } else { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} } relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil) assert.NoError(t, err) @@ -149,7 +159,9 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) { assert.Equal(t, types.RollupCommitted, statuses[0]) proof := &message.BatchProof{ - Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, } err = batchOrm.UpdateProofByHash(context.Background(), dbBatch.Hash, proof, 100) assert.NoError(t, err) @@ -163,6 +175,66 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) { } } +func testL2RelayerProcessPendingBundles(t *testing.T) { + codecVersions := []encoding.CodecVersion{encoding.CodecV3} + for _, codecVersion := range codecVersions { + db := setupL2RelayerDB(t) + defer database.CloseDB(db) + + l2Cfg := cfg.L2Config + var chainConfig *params.ChainConfig + if codecVersion == encoding.CodecV3 { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} + } + relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil) + assert.NoError(t, err) + + batch := &encoding.Batch{ + Index: 1, + TotalL1MessagePoppedBefore: 0, + ParentBatchHash: common.Hash{}, + Chunks: []*encoding.Chunk{chunk1, chunk2}, + } + + batchOrm := orm.NewBatch(db) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{}) + assert.NoError(t, err) + + bundleOrm := orm.NewBundle(db) + bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, codecVersion) + assert.NoError(t, err) + + err = bundleOrm.UpdateRollupStatus(context.Background(), bundle.Hash, types.RollupPending) + assert.NoError(t, err) + + err = bundleOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified) + assert.NoError(t, err) + + relayer.ProcessPendingBundles() + + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0) + assert.NoError(t, err) + assert.Equal(t, 1, len(bundles)) + // no valid proof, rollup status remains the same + assert.Equal(t, types.RollupPending, types.RollupStatus(bundles[0].RollupStatus)) + + proof := &message.BundleProof{ + Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + } + err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, proof, types.ProvingTaskVerified, 600) + assert.NoError(t, err) + + relayer.ProcessPendingBundles() + bundles, err = bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0) + assert.NoError(t, err) + assert.Equal(t, 1, len(bundles)) + assert.Equal(t, types.RollupFinalizing, types.RollupStatus(bundles[0].RollupStatus)) + relayer.StopSenders() + } +} + func testL2RelayerFinalizeTimeoutBatches(t *testing.T) { codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2} for _, codecVersion := range codecVersions { @@ -172,9 +244,13 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) { l2Cfg := cfg.L2Config l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0 - chainConfig := ¶ms.ChainConfig{} + var chainConfig *params.ChainConfig if codecVersion == encoding.CodecV0 { - chainConfig.BernoulliBlock = big.NewInt(0) + chainConfig = ¶ms.ChainConfig{} + } else if codecVersion == encoding.CodecV1 { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} + } else { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} } relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil) assert.NoError(t, err) @@ -205,29 +281,108 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) { err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil) assert.NoError(t, err) - // Check the database for the updated status using TryTimes. - ok := utils.TryTimes(5, func() bool { + assert.Eventually(t, func() bool { relayer.ProcessCommittedBatches() - time.Sleep(time.Second) batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0) if batchErr != nil { return false } + + batchStatus := len(batchInDB) == 1 && types.RollupStatus(batchInDB[0].RollupStatus) == types.RollupFinalizing && + types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified + chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash) if chunkErr != nil { return false } - batchStatus := len(batchInDB) == 1 && types.RollupStatus(batchInDB[0].RollupStatus) == types.RollupFinalizing && - types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified - chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified && types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified return batchStatus && chunkStatus - }) - assert.True(t, ok) + }, 5*time.Second, 100*time.Millisecond, "Batch or Chunk status did not update as expected") + relayer.StopSenders() + } +} + +func testL2RelayerFinalizeTimeoutBundles(t *testing.T) { + codecVersions := []encoding.CodecVersion{encoding.CodecV3} + for _, codecVersion := range codecVersions { + db := setupL2RelayerDB(t) + defer database.CloseDB(db) + + l2Cfg := cfg.L2Config + l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true + l2Cfg.RelayerConfig.FinalizeBundleWithoutProofTimeoutSec = 0 + var chainConfig *params.ChainConfig + if codecVersion == encoding.CodecV3 { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} + } + relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil) + assert.NoError(t, err) + + l2BlockOrm := orm.NewL2Block(db) + err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) + assert.NoError(t, err) + chunkOrm := orm.NewChunk(db) + chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{}) + assert.NoError(t, err) + chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{}) + assert.NoError(t, err) + + batch := &encoding.Batch{ + Index: 1, + TotalL1MessagePoppedBefore: 0, + ParentBatchHash: common.Hash{}, + Chunks: []*encoding.Chunk{chunk1, chunk2}, + } + + batchOrm := orm.NewBatch(db) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{}) + assert.NoError(t, err) + + err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted) + assert.NoError(t, err) + + err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil) + assert.NoError(t, err) + + bundleOrm := orm.NewBundle(db) + bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, codecVersion) + assert.NoError(t, err) + + err = batchOrm.UpdateBundleHashInRange(context.Background(), dbBatch.Index, dbBatch.Index, bundle.Hash, nil) + assert.NoError(t, err) + + assert.Eventually(t, func() bool { + relayer.ProcessPendingBundles() + + bundleInDB, bundleErr := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0) + if bundleErr != nil { + return false + } + + bundleStatus := len(bundleInDB) == 1 && types.RollupStatus(bundleInDB[0].RollupStatus) == types.RollupFinalizing && + types.ProvingStatus(bundleInDB[0].ProvingStatus) == types.ProvingTaskVerified + + batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0) + if batchErr != nil { + return false + } + + batchStatus := len(batchInDB) == 1 && types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified + + chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash) + if chunkErr != nil { + return false + } + + chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified && + types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified + + return bundleStatus && batchStatus && chunkStatus + }, 5*time.Second, 100*time.Millisecond, "Bundle or Batch or Chunk status did not update as expected") relayer.StopSenders() } } @@ -288,7 +443,7 @@ func testL2RelayerCommitConfirm(t *testing.T) { assert.True(t, ok) } -func testL2RelayerFinalizeConfirm(t *testing.T) { +func testL2RelayerFinalizeBatchConfirm(t *testing.T) { db := setupL2RelayerDB(t) defer database.CloseDB(db) @@ -344,6 +499,75 @@ func testL2RelayerFinalizeConfirm(t *testing.T) { assert.True(t, ok) } +func testL2RelayerFinalizeBundleConfirm(t *testing.T) { + db := setupL2RelayerDB(t) + defer database.CloseDB(db) + + // Create and set up the Layer2 Relayer. + l2Cfg := cfg.L2Config + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil) + assert.NoError(t, err) + defer l2Relayer.StopSenders() + + // Simulate message confirmations. + isSuccessful := []bool{true, false} + batchOrm := orm.NewBatch(db) + bundleOrm := orm.NewBundle(db) + batchHashes := make([]string, len(isSuccessful)) + bundleHashes := make([]string, len(isSuccessful)) + for i := range batchHashes { + batch := &encoding.Batch{ + Index: uint64(i + 1), + TotalL1MessagePoppedBefore: 0, + ParentBatchHash: common.Hash{}, + Chunks: []*encoding.Chunk{chunk1, chunk2}, + } + + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, rutils.BatchMetrics{}) + assert.NoError(t, err) + batchHashes[i] = dbBatch.Hash + + bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV3) + assert.NoError(t, err) + bundleHashes[i] = bundle.Hash + + err = batchOrm.UpdateBundleHashInRange(context.Background(), dbBatch.Index, dbBatch.Index, bundle.Hash) + assert.NoError(t, err) + } + + for i, bundleHash := range bundleHashes { + l2Relayer.finalizeSender.SendConfirmation(&sender.Confirmation{ + ContextID: "finalizeBundle-" + bundleHash, + IsSuccessful: isSuccessful[i], + TxHash: common.HexToHash("0x123456789abcdef"), + SenderType: types.SenderTypeFinalizeBatch, + }) + } + + assert.Eventually(t, func() bool { + expectedStatuses := []types.RollupStatus{ + types.RollupFinalized, + types.RollupFinalizeFailed, + } + + for i, bundleHash := range bundleHashes { + bundleInDB, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundleHash}, nil, 0) + if err != nil || len(bundleInDB) != 1 || types.RollupStatus(bundleInDB[0].RollupStatus) != expectedStatuses[i] { + return false + } + + batchInDB, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": batchHashes[i]}, nil, 0) + if err != nil || len(batchInDB) != 1 || types.RollupStatus(batchInDB[0].RollupStatus) != expectedStatuses[i] { + return false + } + } + + return true + }, 5*time.Second, 100*time.Millisecond, "Bundle or Batch status did not update as expected") +} + func testL2RelayerGasOracleConfirm(t *testing.T) { db := setupL2RelayerDB(t) defer database.CloseDB(db) diff --git a/rollup/internal/controller/relayer/relayer_test.go b/rollup/internal/controller/relayer/relayer_test.go index fe093e52f8..2908133472 100644 --- a/rollup/internal/controller/relayer/relayer_test.go +++ b/rollup/internal/controller/relayer/relayer_test.go @@ -124,11 +124,15 @@ func TestFunctions(t *testing.T) { t.Run("TestCreateNewRelayer", testCreateNewRelayer) t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches) t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches) + t.Run("TestL2RelayerProcessPendingBundles", testL2RelayerProcessPendingBundles) t.Run("TestL2RelayerFinalizeTimeoutBatches", testL2RelayerFinalizeTimeoutBatches) + t.Run("TestL2RelayerFinalizeTimeoutBundles", testL2RelayerFinalizeTimeoutBundles) t.Run("TestL2RelayerCommitConfirm", testL2RelayerCommitConfirm) - t.Run("TestL2RelayerFinalizeConfirm", testL2RelayerFinalizeConfirm) + t.Run("TestL2RelayerFinalizeBatchConfirm", testL2RelayerFinalizeBatchConfirm) + t.Run("TestL2RelayerFinalizeBundleConfirm", testL2RelayerFinalizeBundleConfirm) t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm) t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle) + // test getBatchStatusByIndex t.Run("TestGetBatchStatusByIndex", testGetBatchStatusByIndex) } diff --git a/rollup/internal/controller/watcher/batch_proposer.go b/rollup/internal/controller/watcher/batch_proposer.go index d36f7988ff..953a642648 100644 --- a/rollup/internal/controller/watcher/batch_proposer.go +++ b/rollup/internal/controller/watcher/batch_proposer.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "math/big" "time" "github.com/prometheus/client_golang/prometheus" @@ -36,7 +35,6 @@ type BatchProposer struct { batchTimeoutSec uint64 gasCostIncreaseMultiplier float64 maxUncompressedBatchBytesSize uint64 - forkMap map[uint64]bool chainCfg *params.ChainConfig @@ -60,14 +58,13 @@ type BatchProposer struct { // NewBatchProposer creates a new BatchProposer instance. func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer { - forkHeights, forkMap, _ := forks.CollectSortedForkHeights(chainCfg) - log.Debug("new batch proposer", + log.Info("new batch proposer", "maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch, "maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch, "batchTimeoutSec", cfg.BatchTimeoutSec, "gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier, - "maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize, - "forkHeights", forkHeights) + "maxBlobSize", maxBlobSize, + "maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize) p := &BatchProposer{ ctx: ctx, @@ -80,7 +77,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai batchTimeoutSec: cfg.BatchTimeoutSec, gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier, maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize, - forkMap: forkMap, chainCfg: chainCfg, batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ @@ -225,20 +221,7 @@ func (p *BatchProposer) proposeBatch() error { return err } - startBlockNum := new(big.Int).SetUint64(firstUnbatchedChunk.StartBlockNumber) - - var codecVersion encoding.CodecVersion - var maxChunksThisBatch uint64 - if !p.chainCfg.IsBernoulli(startBlockNum) { - codecVersion = encoding.CodecV0 - maxChunksThisBatch = 15 - } else if !p.chainCfg.IsCurie(startBlockNum) { - codecVersion = encoding.CodecV1 - maxChunksThisBatch = 15 - } else { - codecVersion = encoding.CodecV2 - maxChunksThisBatch = 45 - } + maxChunksThisBatch := forks.GetMaxChunksPerBatch(p.chainCfg, firstUnbatchedChunk.StartBlockNumber, firstUnbatchedChunk.StartBlockTime) // select at most maxChunkNumPerBatch chunks dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, firstUnbatchedChunkIndex, int(maxChunksThisBatch)) @@ -250,13 +233,14 @@ func (p *BatchProposer) proposeBatch() error { return nil } - for i, chunk := range dbChunks { - // if a chunk is starting at a fork boundary, only consider earlier chunks - if i != 0 && p.forkMap[chunk.StartBlockNumber] { + // Ensure all chunks in the same batch use the same hardfork name + // If a different hardfork name is found, truncate the chunks slice at that point + hardforkName := forks.GetHardforkName(p.chainCfg, dbChunks[0].StartBlockNumber, dbChunks[0].StartBlockTime) + for i := 1; i < len(dbChunks); i++ { + currentHardfork := forks.GetHardforkName(p.chainCfg, dbChunks[i].StartBlockNumber, dbChunks[i].StartBlockTime) + if currentHardfork != hardforkName { dbChunks = dbChunks[:i] - if uint64(len(dbChunks)) < maxChunksThisBatch { - maxChunksThisBatch = uint64(len(dbChunks)) - } + maxChunksThisBatch = uint64(len(dbChunks)) // update maxChunksThisBatch to trigger batching, because these chunks are the last chunks before the hardfork break } } @@ -271,6 +255,8 @@ func (p *BatchProposer) proposeBatch() error { return err } + codecVersion := forks.GetCodecVersion(p.chainCfg, firstUnbatchedChunk.StartBlockNumber, firstUnbatchedChunk.StartBlockTime) + var batch encoding.Batch batch.Index = dbParentBatch.Index + 1 batch.ParentBatchHash = common.HexToHash(dbParentBatch.Hash) diff --git a/rollup/internal/controller/watcher/batch_proposer_test.go b/rollup/internal/controller/watcher/batch_proposer_test.go index 845109b98e..b86e13274d 100644 --- a/rollup/internal/controller/watcher/batch_proposer_test.go +++ b/rollup/internal/controller/watcher/batch_proposer_test.go @@ -26,7 +26,6 @@ func testBatchProposerCodecv0Limits(t *testing.T) { maxL1CommitGas uint64 maxL1CommitCalldataSize uint64 batchTimeoutSec uint64 - forkBlock *big.Int expectedBatchesLen int expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0 }{ @@ -75,15 +74,6 @@ func testBatchProposerCodecv0Limits(t *testing.T) { expectedBatchesLen: 1, expectedChunksInFirstBatch: 1, }, - { - name: "ForkBlockReached", - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 1, - expectedChunksInFirstBatch: 1, - forkBlock: big.NewInt(3), - }, } for _, tt := range tests { @@ -126,9 +116,7 @@ func testBatchProposerCodecv0Limits(t *testing.T) { MaxRowConsumptionPerChunk: 1000000, ChunkTimeoutSec: 300, GasCostIncreaseMultiplier: 1.2, - }, ¶ms.ChainConfig{ - HomesteadBlock: tt.forkBlock, - }, db, nil) + }, ¶ms.ChainConfig{}, db, nil) cp.TryProposeChunk() // chunk1 contains block1 cp.TryProposeChunk() // chunk2 contains block2 @@ -144,10 +132,7 @@ func testBatchProposerCodecv0Limits(t *testing.T) { MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize, BatchTimeoutSec: tt.batchTimeoutSec, GasCostIncreaseMultiplier: 1.2, - }, ¶ms.ChainConfig{ - HomesteadBlock: tt.forkBlock, - CurieBlock: big.NewInt(0), - }, db, nil) + }, ¶ms.ChainConfig{}, db, nil) bp.TryProposeBatch() batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) @@ -178,7 +163,6 @@ func testBatchProposerCodecv1Limits(t *testing.T) { maxL1CommitGas uint64 maxL1CommitCalldataSize uint64 batchTimeoutSec uint64 - forkBlock *big.Int expectedBatchesLen int expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0 }{ @@ -227,15 +211,6 @@ func testBatchProposerCodecv1Limits(t *testing.T) { expectedBatchesLen: 1, expectedChunksInFirstBatch: 1, }, - { - name: "ForkBlockReached", - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 1, - expectedChunksInFirstBatch: 1, - forkBlock: big.NewInt(3), - }, } for _, tt := range tests { @@ -280,7 +255,6 @@ func testBatchProposerCodecv1Limits(t *testing.T) { GasCostIncreaseMultiplier: 1.2, }, ¶ms.ChainConfig{ BernoulliBlock: big.NewInt(0), - HomesteadBlock: tt.forkBlock, }, db, nil) cp.TryProposeChunk() // chunk1 contains block1 cp.TryProposeChunk() // chunk2 contains block2 @@ -299,7 +273,6 @@ func testBatchProposerCodecv1Limits(t *testing.T) { GasCostIncreaseMultiplier: 1.2, }, ¶ms.ChainConfig{ BernoulliBlock: big.NewInt(0), - HomesteadBlock: tt.forkBlock, }, db, nil) bp.TryProposeBatch() @@ -331,7 +304,6 @@ func testBatchProposerCodecv2Limits(t *testing.T) { maxL1CommitGas uint64 maxL1CommitCalldataSize uint64 batchTimeoutSec uint64 - forkBlock *big.Int expectedBatchesLen int expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0 }{ @@ -366,7 +338,7 @@ func testBatchProposerCodecv2Limits(t *testing.T) { }, { name: "MaxL1CommitGasPerBatchIsFirstChunk", - maxL1CommitGas: 190330, + maxL1CommitGas: 189179, maxL1CommitCalldataSize: 1000000, batchTimeoutSec: 1000000000000, expectedBatchesLen: 1, @@ -380,14 +352,150 @@ func testBatchProposerCodecv2Limits(t *testing.T) { expectedBatchesLen: 1, expectedChunksInFirstBatch: 1, }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db := setupDB(t) + defer database.CloseDB(db) + + // Add genesis batch. + block := &encoding.Block{ + Header: &gethTypes.Header{ + Number: big.NewInt(0), + }, + RowConsumption: &gethTypes.RowConsumption{}, + } + chunk := &encoding.Chunk{ + Blocks: []*encoding.Block{block}, + } + chunkOrm := orm.NewChunk(db) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) + assert.NoError(t, err) + batch := &encoding.Batch{ + Index: 0, + TotalL1MessagePoppedBefore: 0, + ParentBatchHash: common.Hash{}, + Chunks: []*encoding.Chunk{chunk}, + } + batchOrm := orm.NewBatch(db) + _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) + assert.NoError(t, err) + + l2BlockOrm := orm.NewL2Block(db) + err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) + assert.NoError(t, err) + + cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ + MaxBlockNumPerChunk: 1, + MaxTxNumPerChunk: 10000, + MaxL1CommitGasPerChunk: 50000000000, + MaxL1CommitCalldataSizePerChunk: 1000000, + MaxRowConsumptionPerChunk: 1000000, + ChunkTimeoutSec: 300, + GasCostIncreaseMultiplier: 1.2, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, ¶ms.ChainConfig{ + BernoulliBlock: big.NewInt(0), + CurieBlock: big.NewInt(0), + }, db, nil) + cp.TryProposeChunk() // chunk1 contains block1 + cp.TryProposeChunk() // chunk2 contains block2 + + chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2) + assert.NoError(t, err) + assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas) + assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize) + assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas) + assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize) + + bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ + MaxL1CommitGasPerBatch: tt.maxL1CommitGas, + MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize, + BatchTimeoutSec: tt.batchTimeoutSec, + GasCostIncreaseMultiplier: 1.2, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, ¶ms.ChainConfig{ + BernoulliBlock: big.NewInt(0), + CurieBlock: big.NewInt(0), + }, db, nil) + bp.TryProposeBatch() + + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) + assert.NoError(t, err) + assert.Len(t, batches, tt.expectedBatchesLen+1) + batches = batches[1:] + if tt.expectedBatchesLen > 0 { + assert.Equal(t, uint64(1), batches[0].StartChunkIndex) + assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex) + assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus)) + assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus)) + + dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch) + assert.NoError(t, err) + assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch)) + for _, chunk := range dbChunks { + assert.Equal(t, batches[0].Hash, chunk.BatchHash) + assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus)) + } + } + }) + } +} + +func testBatchProposerCodecv3Limits(t *testing.T) { + tests := []struct { + name string + maxL1CommitGas uint64 + maxL1CommitCalldataSize uint64 + batchTimeoutSec uint64 + expectedBatchesLen int + expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0 + }{ + { + name: "NoLimitReached", + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 1000000, + batchTimeoutSec: 1000000000000, + expectedBatchesLen: 0, + }, { - name: "ForkBlockReached", + name: "Timeout", maxL1CommitGas: 50000000000, maxL1CommitCalldataSize: 1000000, + batchTimeoutSec: 0, + expectedBatchesLen: 1, + expectedChunksInFirstBatch: 2, + }, + { + name: "MaxL1CommitGasPerBatchIs0", + maxL1CommitGas: 0, + maxL1CommitCalldataSize: 1000000, + batchTimeoutSec: 1000000000000, + expectedBatchesLen: 0, + }, + { + name: "MaxL1CommitCalldataSizePerBatchIs0", + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 0, + batchTimeoutSec: 1000000000000, + expectedBatchesLen: 0, + }, + { + name: "MaxL1CommitGasPerBatchIsFirstChunk", + maxL1CommitGas: 249179, + maxL1CommitCalldataSize: 1000000, + batchTimeoutSec: 1000000000000, + expectedBatchesLen: 1, + expectedChunksInFirstBatch: 1, + }, + { + name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk", + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 60, batchTimeoutSec: 1000000000000, expectedBatchesLen: 1, expectedChunksInFirstBatch: 1, - forkBlock: big.NewInt(3), }, } @@ -435,16 +543,16 @@ func testBatchProposerCodecv2Limits(t *testing.T) { }, ¶ms.ChainConfig{ BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), - HomesteadBlock: tt.forkBlock, + DarwinTime: new(uint64), }, db, nil) cp.TryProposeChunk() // chunk1 contains block1 cp.TryProposeChunk() // chunk2 contains block2 chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2) assert.NoError(t, err) - assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas) + assert.Equal(t, uint64(51124), chunks[0].TotalL1CommitGas) assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize) - assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas) + assert.Equal(t, uint64(51124), chunks[1].TotalL1CommitGas) assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize) bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ @@ -456,7 +564,7 @@ func testBatchProposerCodecv2Limits(t *testing.T) { }, ¶ms.ChainConfig{ BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), - HomesteadBlock: tt.forkBlock, + DarwinTime: new(uint64), }, db, nil) bp.TryProposeBatch() @@ -721,9 +829,90 @@ func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) { assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize) } +func testBatchCommitGasAndCalldataSizeCodecv3Estimation(t *testing.T) { + db := setupDB(t) + defer database.CloseDB(db) + + // Add genesis batch. + block := &encoding.Block{ + Header: &gethTypes.Header{ + Number: big.NewInt(0), + }, + RowConsumption: &gethTypes.RowConsumption{}, + } + chunk := &encoding.Chunk{ + Blocks: []*encoding.Block{block}, + } + chunkOrm := orm.NewChunk(db) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) + assert.NoError(t, err) + batch := &encoding.Batch{ + Index: 0, + TotalL1MessagePoppedBefore: 0, + ParentBatchHash: common.Hash{}, + Chunks: []*encoding.Chunk{chunk}, + } + batchOrm := orm.NewBatch(db) + _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) + assert.NoError(t, err) + + l2BlockOrm := orm.NewL2Block(db) + err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) + assert.NoError(t, err) + + cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ + MaxBlockNumPerChunk: 1, + MaxTxNumPerChunk: 10000, + MaxL1CommitGasPerChunk: 50000000000, + MaxL1CommitCalldataSizePerChunk: 1000000, + MaxRowConsumptionPerChunk: 1000000, + ChunkTimeoutSec: 300, + GasCostIncreaseMultiplier: 1.2, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}, db, nil) + cp.TryProposeChunk() // chunk1 contains block1 + cp.TryProposeChunk() // chunk2 contains block2 + + chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2) + assert.NoError(t, err) + assert.Equal(t, uint64(51124), chunks[0].TotalL1CommitGas) + assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize) + assert.Equal(t, uint64(51124), chunks[1].TotalL1CommitGas) + assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize) + + bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ + MaxL1CommitGasPerBatch: 50000000000, + MaxL1CommitCalldataSizePerBatch: 1000000, + BatchTimeoutSec: 0, + GasCostIncreaseMultiplier: 1.2, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}, db, nil) + bp.TryProposeBatch() + + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) + assert.NoError(t, err) + assert.Len(t, batches, 2) + batches = batches[1:] + assert.Equal(t, uint64(1), batches[0].StartChunkIndex) + assert.Equal(t, uint64(2), batches[0].EndChunkIndex) + assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus)) + assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus)) + + dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2) + assert.NoError(t, err) + assert.Len(t, dbChunks, 2) + for _, chunk := range dbChunks { + assert.Equal(t, batches[0].Hash, chunk.BatchHash) + assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus)) + } + + assert.Equal(t, uint64(209350), batches[0].TotalL1CommitGas) + assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize) +} + func testBatchProposerBlobSizeLimit(t *testing.T) { - compressionTests := []bool{false, true} // false for uncompressed, true for compressed - for _, compressed := range compressionTests { + codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} + for _, codecVersion := range codecVersions { db := setupDB(t) // Add genesis batch. @@ -750,10 +939,14 @@ func testBatchProposerBlobSizeLimit(t *testing.T) { assert.NoError(t, err) var chainConfig *params.ChainConfig - if compressed { + if codecVersion == encoding.CodecV0 { // will never hit blob size limit + chainConfig = ¶ms.ChainConfig{} + } else if codecVersion == encoding.CodecV1 { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} + } else if codecVersion == encoding.CodecV2 { chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} } else { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} } cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ @@ -769,7 +962,7 @@ func testBatchProposerBlobSizeLimit(t *testing.T) { blockHeight := int64(0) block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json") - for total := int64(0); total < 20; total++ { + for total := int64(0); total < 90; total++ { for i := int64(0); i < 30; i++ { blockHeight++ l2BlockOrm := orm.NewL2Block(db) @@ -783,12 +976,12 @@ func testBatchProposerBlobSizeLimit(t *testing.T) { bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ MaxL1CommitGasPerBatch: math.MaxUint64, MaxL1CommitCalldataSizePerBatch: math.MaxUint64, - BatchTimeoutSec: math.MaxUint64, + BatchTimeoutSec: math.MaxUint32, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, }, chainConfig, db, nil) - for i := 0; i < 30; i++ { + for i := 0; i < 2; i++ { bp.TryProposeBatch() } @@ -798,12 +991,18 @@ func testBatchProposerBlobSizeLimit(t *testing.T) { var expectedNumBatches int var numChunksMultiplier uint64 - if compressed { - expectedNumBatches = 1 - numChunksMultiplier = 20 - } else { - expectedNumBatches = 20 + if codecVersion == encoding.CodecV0 { + expectedNumBatches = 2 + numChunksMultiplier = 15 + } else if codecVersion == encoding.CodecV1 { + expectedNumBatches = 2 numChunksMultiplier = 1 + } else if codecVersion == encoding.CodecV2 { + expectedNumBatches = 2 + numChunksMultiplier = 45 + } else { + expectedNumBatches = 2 + numChunksMultiplier = 45 } assert.Len(t, batches, expectedNumBatches) @@ -815,8 +1014,8 @@ func testBatchProposerBlobSizeLimit(t *testing.T) { } func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) { - compressionTests := []bool{false, true} // false for uncompressed, true for compressed - for _, compressed := range compressionTests { + codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} + for _, codecVersion := range codecVersions { db := setupDB(t) // Add genesis batch. @@ -842,11 +1041,20 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) { _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) assert.NoError(t, err) + var expectedChunkNum uint64 var chainConfig *params.ChainConfig - if compressed { + if codecVersion == encoding.CodecV0 { + chainConfig = ¶ms.ChainConfig{} + expectedChunkNum = 15 + } else if codecVersion == encoding.CodecV1 { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} + expectedChunkNum = 15 + } else if codecVersion == encoding.CodecV2 { chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} + expectedChunkNum = 45 } else { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} + expectedChunkNum = 45 } cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ @@ -871,7 +1079,7 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) { bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ MaxL1CommitGasPerBatch: math.MaxUint64, MaxL1CommitCalldataSizePerBatch: math.MaxUint64, - BatchTimeoutSec: math.MaxUint64, + BatchTimeoutSec: math.MaxUint32, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, }, chainConfig, db, nil) @@ -882,14 +1090,90 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) { assert.Len(t, batches, 2) dbBatch := batches[1] - var expectedChunkNum uint64 - if compressed { - expectedChunkNum = 45 - } else { - expectedChunkNum = 15 - } assert.Equal(t, expectedChunkNum, dbBatch.EndChunkIndex) database.CloseDB(db) } } + +func testBatchProposerRespectHardforks(t *testing.T) { + db := setupDB(t) + defer database.CloseDB(db) + + chainConfig := ¶ms.ChainConfig{ + BernoulliBlock: big.NewInt(1), + CurieBlock: big.NewInt(2), + DarwinTime: func() *uint64 { t := uint64(4); return &t }(), + } + + // Add genesis batch. + block := &encoding.Block{ + Header: &gethTypes.Header{ + Number: big.NewInt(0), + }, + RowConsumption: &gethTypes.RowConsumption{}, + } + chunk := &encoding.Chunk{ + Blocks: []*encoding.Block{block}, + } + chunkOrm := orm.NewChunk(db) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) + assert.NoError(t, err) + batch := &encoding.Batch{ + Index: 0, + TotalL1MessagePoppedBefore: 0, + ParentBatchHash: common.Hash{}, + Chunks: []*encoding.Chunk{chunk}, + } + batchOrm := orm.NewBatch(db) + _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) + assert.NoError(t, err) + + cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ + MaxBlockNumPerChunk: math.MaxUint64, + MaxTxNumPerChunk: math.MaxUint64, + MaxL1CommitGasPerChunk: math.MaxUint64, + MaxL1CommitCalldataSizePerChunk: math.MaxUint64, + MaxRowConsumptionPerChunk: math.MaxUint64, + ChunkTimeoutSec: 0, + GasCostIncreaseMultiplier: 1, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, chainConfig, db, nil) + + block = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json") + for i := int64(1); i <= 60; i++ { + block.Header.Number = big.NewInt(i) + block.Header.Time = uint64(i) + err = orm.NewL2Block(db).InsertL2Blocks(context.Background(), []*encoding.Block{block}) + assert.NoError(t, err) + } + + for i := 0; i < 5; i++ { + cp.TryProposeChunk() + } + + bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ + MaxL1CommitGasPerBatch: math.MaxUint64, + MaxL1CommitCalldataSizePerBatch: math.MaxUint64, + BatchTimeoutSec: 0, + GasCostIncreaseMultiplier: 1, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, chainConfig, db, nil) + + for i := 0; i < 5; i++ { + bp.TryProposeBatch() + } + + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) + assert.NoError(t, err) + assert.Len(t, batches, 4) + + expectedEndChunkIndices := []uint64{0, 1, 3, 4} + expectedEndBlockNumbers := []uint64{0, 1, 3, 60} + for i, batch := range batches { + assert.Equal(t, expectedEndChunkIndices[i], batch.EndChunkIndex) + chunk, err := chunkOrm.GetChunkByIndex(context.Background(), batch.EndChunkIndex) + assert.NoError(t, err) + assert.Equal(t, expectedEndBlockNumbers[i], chunk.EndBlockNumber) + } +} diff --git a/rollup/internal/controller/watcher/bundle_proposer.go b/rollup/internal/controller/watcher/bundle_proposer.go new file mode 100644 index 0000000000..d975ea3320 --- /dev/null +++ b/rollup/internal/controller/watcher/bundle_proposer.go @@ -0,0 +1,182 @@ +package watcher + +import ( + "context" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/scroll-tech/da-codec/encoding" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/params" + "gorm.io/gorm" + + "scroll-tech/common/forks" + + "scroll-tech/rollup/internal/config" + "scroll-tech/rollup/internal/orm" +) + +// BundleProposer proposes bundles based on available unbundled batches. +type BundleProposer struct { + ctx context.Context + db *gorm.DB + + chunkOrm *orm.Chunk + batchOrm *orm.Batch + bundleOrm *orm.Bundle + + maxBatchNumPerBundle uint64 + bundleTimeoutSec uint64 + + chainCfg *params.ChainConfig + + bundleProposerCircleTotal prometheus.Counter + proposeBundleFailureTotal prometheus.Counter + proposeBundleUpdateInfoTotal prometheus.Counter + proposeBundleUpdateInfoFailureTotal prometheus.Counter + bundleBatchesNum prometheus.Gauge + bundleFirstBlockTimeoutReached prometheus.Counter + bundleBatchesProposeNotEnoughTotal prometheus.Counter +} + +// NewBundleProposer creates a new BundleProposer instance. +func NewBundleProposer(ctx context.Context, cfg *config.BundleProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BundleProposer { + log.Info("new bundle proposer", "bundleBatchesNum", cfg.MaxBatchNumPerBundle, "bundleTimeoutSec", cfg.BundleTimeoutSec) + + p := &BundleProposer{ + ctx: ctx, + db: db, + chunkOrm: orm.NewChunk(db), + batchOrm: orm.NewBatch(db), + bundleOrm: orm.NewBundle(db), + maxBatchNumPerBundle: cfg.MaxBatchNumPerBundle, + bundleTimeoutSec: cfg.BundleTimeoutSec, + chainCfg: chainCfg, + + bundleProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_propose_bundle_circle_total", + Help: "Total number of propose bundle attempts.", + }), + proposeBundleFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_propose_bundle_failure_total", + Help: "Total number of propose bundle failures.", + }), + proposeBundleUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_propose_bundle_update_info_total", + Help: "Total number of propose bundle update info attempts.", + }), + proposeBundleUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_propose_bundle_update_info_failure_total", + Help: "Total number of propose bundle update info failures.", + }), + bundleBatchesNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ + Name: "rollup_propose_bundle_batches_number", + Help: "The number of batches in the current bundle.", + }), + bundleFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_propose_bundle_first_block_timeout_reached_total", + Help: "Total times the first block in a bundle reached the timeout.", + }), + bundleBatchesProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_propose_bundle_batches_propose_not_enough_total", + Help: "Total number of times there were not enough batches to propose a bundle.", + }), + } + + return p +} + +// TryProposeBundle tries to propose a new bundle. +func (p *BundleProposer) TryProposeBundle() { + p.bundleProposerCircleTotal.Inc() + if err := p.proposeBundle(); err != nil { + p.proposeBundleFailureTotal.Inc() + log.Error("propose new bundle failed", "err", err) + return + } +} + +func (p *BundleProposer) updateDBBundleInfo(batches []*orm.Batch, codecVersion encoding.CodecVersion) error { + if len(batches) == 0 { + return nil + } + + p.proposeBundleUpdateInfoTotal.Inc() + err := p.db.Transaction(func(dbTX *gorm.DB) error { + bundle, err := p.bundleOrm.InsertBundle(p.ctx, batches, codecVersion, dbTX) + if err != nil { + log.Warn("BundleProposer.InsertBundle failed", "err", err) + return err + } + if err := p.batchOrm.UpdateBundleHashInRange(p.ctx, bundle.StartBatchIndex, bundle.EndBatchIndex, bundle.Hash, dbTX); err != nil { + log.Error("failed to update bundle_hash for batches", "bundle hash", bundle.Hash, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err) + return err + } + return nil + }) + if err != nil { + p.proposeBundleUpdateInfoFailureTotal.Inc() + log.Error("update chunk info in orm failed", "err", err) + return err + } + return nil +} + +func (p *BundleProposer) proposeBundle() error { + firstUnbundledBatchIndex, err := p.bundleOrm.GetFirstUnbundledBatchIndex(p.ctx) + if err != nil { + return err + } + + // select at most maxBlocksThisChunk blocks + maxBatchesThisBundle := p.maxBatchNumPerBundle + batches, err := p.batchOrm.GetBatchesGEIndexGECodecVersion(p.ctx, firstUnbundledBatchIndex, encoding.CodecV3, int(maxBatchesThisBundle)) + if err != nil { + return err + } + + if len(batches) == 0 { + return nil + } + + // Ensure all blocks in the same chunk use the same hardfork name + // If a different hardfork name is found, truncate the blocks slice at that point + firstChunk, err := p.chunkOrm.GetChunkByIndex(p.ctx, batches[0].StartChunkIndex) + if err != nil { + return err + } + hardforkName := forks.GetHardforkName(p.chainCfg, firstChunk.StartBlockNumber, firstChunk.StartBlockTime) + codecVersion := forks.GetCodecVersion(p.chainCfg, firstChunk.StartBlockNumber, firstChunk.StartBlockTime) + for i := 1; i < len(batches); i++ { + chunk, err := p.chunkOrm.GetChunkByIndex(p.ctx, batches[i].StartChunkIndex) + if err != nil { + return err + } + currentHardfork := forks.GetHardforkName(p.chainCfg, chunk.StartBlockNumber, chunk.StartBlockTime) + if currentHardfork != hardforkName { + batches = batches[:i] + maxBatchesThisBundle = uint64(i) // update maxBlocksThisChunk to trigger chunking, because these blocks are the last blocks before the hardfork + break + } + } + + if uint64(len(batches)) == maxBatchesThisBundle { + log.Info("reached maximum number of batches per bundle", "batch count", len(batches), "start batch index", batches[0].Index, "end batch index", batches[len(batches)-1].Index) + p.bundleFirstBlockTimeoutReached.Inc() + p.bundleBatchesNum.Set(float64(len(batches))) + return p.updateDBBundleInfo(batches, codecVersion) + } + + currentTimeSec := uint64(time.Now().Unix()) + if firstChunk.StartBlockTime+p.bundleTimeoutSec < currentTimeSec { + log.Info("first block timeout", "batch count", len(batches), "start block number", firstChunk.StartBlockNumber, "start block timestamp", firstChunk.StartBlockTime, "current time", currentTimeSec) + p.bundleFirstBlockTimeoutReached.Inc() + p.bundleBatchesNum.Set(float64(len(batches))) + return p.updateDBBundleInfo(batches, codecVersion) + } + + log.Debug("pending batches are not enough and do not contain a timeout batch") + p.bundleBatchesProposeNotEnoughTotal.Inc() + return nil +} diff --git a/rollup/internal/controller/watcher/bundle_proposer_test.go b/rollup/internal/controller/watcher/bundle_proposer_test.go new file mode 100644 index 0000000000..5b2e4458f5 --- /dev/null +++ b/rollup/internal/controller/watcher/bundle_proposer_test.go @@ -0,0 +1,226 @@ +package watcher + +import ( + "context" + "math" + "math/big" + "testing" + + "github.com/scroll-tech/da-codec/encoding" + "github.com/scroll-tech/go-ethereum/common" + gethTypes "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/params" + "github.com/stretchr/testify/assert" + + "scroll-tech/common/database" + "scroll-tech/common/types" + + "scroll-tech/rollup/internal/config" + "scroll-tech/rollup/internal/orm" + "scroll-tech/rollup/internal/utils" +) + +func testBundleProposerLimits(t *testing.T) { + tests := []struct { + name string + maxBatchNumPerBundle uint64 + bundleTimeoutSec uint64 + expectedBundlesLen int + expectedBatchesInFirstBundle uint64 // only be checked when expectedBundlesLen > 0 + }{ + { + name: "NoLimitReached", + maxBatchNumPerBundle: math.MaxUint64, + bundleTimeoutSec: math.MaxUint32, + expectedBundlesLen: 0, + }, + { + name: "Timeout", + maxBatchNumPerBundle: math.MaxUint64, + bundleTimeoutSec: 0, + expectedBundlesLen: 1, + expectedBatchesInFirstBundle: 2, + }, + { + name: "maxBatchNumPerBundleIs0", + maxBatchNumPerBundle: 0, + bundleTimeoutSec: math.MaxUint32, + expectedBundlesLen: 0, + }, + { + name: "maxBatchNumPerBundleIs1", + maxBatchNumPerBundle: 1, + bundleTimeoutSec: math.MaxUint32, + expectedBundlesLen: 1, + expectedBatchesInFirstBundle: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db := setupDB(t) + defer database.CloseDB(db) + + // Add genesis batch. + block := &encoding.Block{ + Header: &gethTypes.Header{ + Number: big.NewInt(0), + }, + RowConsumption: &gethTypes.RowConsumption{}, + } + chunk := &encoding.Chunk{ + Blocks: []*encoding.Block{block}, + } + chunkOrm := orm.NewChunk(db) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) + assert.NoError(t, err) + batch := &encoding.Batch{ + Index: 0, + TotalL1MessagePoppedBefore: 0, + ParentBatchHash: common.Hash{}, + Chunks: []*encoding.Chunk{chunk}, + } + batchOrm := orm.NewBatch(db) + _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) + assert.NoError(t, err) + + l2BlockOrm := orm.NewL2Block(db) + err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) + assert.NoError(t, err) + + chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} + + cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ + MaxBlockNumPerChunk: 1, + MaxTxNumPerChunk: math.MaxUint64, + MaxL1CommitGasPerChunk: math.MaxUint64, + MaxL1CommitCalldataSizePerChunk: math.MaxUint64, + MaxRowConsumptionPerChunk: math.MaxUint64, + ChunkTimeoutSec: math.MaxUint32, + GasCostIncreaseMultiplier: 1, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, chainConfig, db, nil) + + bap := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ + MaxL1CommitGasPerBatch: math.MaxUint64, + MaxL1CommitCalldataSizePerBatch: math.MaxUint64, + BatchTimeoutSec: 0, + GasCostIncreaseMultiplier: 1, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, chainConfig, db, nil) + + cp.TryProposeChunk() // chunk1 contains block1 + bap.TryProposeBatch() // batch1 contains chunk1 + cp.TryProposeChunk() // chunk2 contains block2 + bap.TryProposeBatch() // batch2 contains chunk2 + + bup := NewBundleProposer(context.Background(), &config.BundleProposerConfig{ + MaxBatchNumPerBundle: tt.maxBatchNumPerBundle, + BundleTimeoutSec: tt.bundleTimeoutSec, + }, chainConfig, db, nil) + + bup.TryProposeBundle() + + bundleOrm := orm.NewBundle(db) + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, []string{}, 0) + assert.NoError(t, err) + assert.Len(t, bundles, tt.expectedBundlesLen) + if tt.expectedBundlesLen > 0 { + assert.Equal(t, uint64(1), bundles[0].StartBatchIndex) + assert.Equal(t, tt.expectedBatchesInFirstBundle, bundles[0].EndBatchIndex) + assert.Equal(t, types.RollupPending, types.RollupStatus(bundles[0].RollupStatus)) + assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(bundles[0].ProvingStatus)) + } + }) + } +} + +func testBundleProposerRespectHardforks(t *testing.T) { + db := setupDB(t) + defer database.CloseDB(db) + + chainConfig := ¶ms.ChainConfig{ + BernoulliBlock: big.NewInt(1), + CurieBlock: big.NewInt(2), + DarwinTime: func() *uint64 { t := uint64(4); return &t }(), + } + + // Add genesis batch. + block := &encoding.Block{ + Header: &gethTypes.Header{ + Number: big.NewInt(0), + }, + RowConsumption: &gethTypes.RowConsumption{}, + } + chunk := &encoding.Chunk{ + Blocks: []*encoding.Block{block}, + } + chunkOrm := orm.NewChunk(db) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) + assert.NoError(t, err) + batch := &encoding.Batch{ + Index: 0, + TotalL1MessagePoppedBefore: 0, + ParentBatchHash: common.Hash{}, + Chunks: []*encoding.Chunk{chunk}, + } + batchOrm := orm.NewBatch(db) + _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) + assert.NoError(t, err) + + cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ + MaxBlockNumPerChunk: math.MaxUint64, + MaxTxNumPerChunk: math.MaxUint64, + MaxL1CommitGasPerChunk: math.MaxUint64, + MaxL1CommitCalldataSizePerChunk: math.MaxUint64, + MaxRowConsumptionPerChunk: math.MaxUint64, + ChunkTimeoutSec: 0, + GasCostIncreaseMultiplier: 1, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, chainConfig, db, nil) + + block = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json") + for i := int64(1); i <= 60; i++ { + block.Header.Number = big.NewInt(i) + block.Header.Time = uint64(i) + err = orm.NewL2Block(db).InsertL2Blocks(context.Background(), []*encoding.Block{block}) + assert.NoError(t, err) + } + + for i := 0; i < 5; i++ { + cp.TryProposeChunk() + } + + bap := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ + MaxL1CommitGasPerBatch: math.MaxUint64, + MaxL1CommitCalldataSizePerBatch: math.MaxUint64, + BatchTimeoutSec: 0, + GasCostIncreaseMultiplier: 1, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, chainConfig, db, nil) + + for i := 0; i < 5; i++ { + bap.TryProposeBatch() + } + + bup := NewBundleProposer(context.Background(), &config.BundleProposerConfig{ + MaxBatchNumPerBundle: math.MaxUint64, + BundleTimeoutSec: 0, + }, chainConfig, db, nil) + + for i := 0; i < 5; i++ { + bup.TryProposeBundle() + } + + bundleOrm := orm.NewBundle(db) + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, []string{}, 0) + assert.NoError(t, err) + assert.Len(t, bundles, 1) + + expectedStartBatchIndices := []uint64{3} + expectedEndChunkIndices := []uint64{3} + for i, bundle := range bundles { + assert.Equal(t, expectedStartBatchIndices[i], bundle.StartBatchIndex) + assert.Equal(t, expectedEndChunkIndices[i], bundle.EndBatchIndex) + } +} diff --git a/rollup/internal/controller/watcher/chunk_proposer.go b/rollup/internal/controller/watcher/chunk_proposer.go index 3b91249dfd..62d8c73dfe 100644 --- a/rollup/internal/controller/watcher/chunk_proposer.go +++ b/rollup/internal/controller/watcher/chunk_proposer.go @@ -36,7 +36,6 @@ type ChunkProposer struct { chunkTimeoutSec uint64 gasCostIncreaseMultiplier float64 maxUncompressedBatchBytesSize uint64 - forkHeights []uint64 chainCfg *params.ChainConfig @@ -62,16 +61,16 @@ type ChunkProposer struct { // NewChunkProposer creates a new ChunkProposer instance. func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer { - forkHeights, _, _ := forks.CollectSortedForkHeights(chainCfg) - log.Debug("new chunk proposer", + log.Info("new chunk proposer", + "maxBlockNumPerChunk", cfg.MaxBlockNumPerChunk, "maxTxNumPerChunk", cfg.MaxTxNumPerChunk, "maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk, "maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk, "maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk, "chunkTimeoutSec", cfg.ChunkTimeoutSec, "gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier, - "maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize, - "forkHeights", forkHeights) + "maxBlobSize", maxBlobSize, + "maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize) p := &ChunkProposer{ ctx: ctx, @@ -86,7 +85,6 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai chunkTimeoutSec: cfg.ChunkTimeoutSec, gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier, maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize, - forkHeights: forkHeights, chainCfg: chainCfg, chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ @@ -240,10 +238,6 @@ func (p *ChunkProposer) proposeChunk() error { } maxBlocksThisChunk := p.maxBlockNumPerChunk - blocksUntilFork := forks.BlocksUntilFork(unchunkedBlockHeight, p.forkHeights) - if blocksUntilFork != 0 && blocksUntilFork < maxBlocksThisChunk { - maxBlocksThisChunk = blocksUntilFork - } // select at most maxBlocksThisChunk blocks blocks, err := p.l2BlockOrm.GetL2BlocksGEHeight(p.ctx, unchunkedBlockHeight, int(maxBlocksThisChunk)) @@ -255,15 +249,20 @@ func (p *ChunkProposer) proposeChunk() error { return nil } - var codecVersion encoding.CodecVersion - if !p.chainCfg.IsBernoulli(blocks[0].Header.Number) { - codecVersion = encoding.CodecV0 - } else if !p.chainCfg.IsCurie(blocks[0].Header.Number) { - codecVersion = encoding.CodecV1 - } else { - codecVersion = encoding.CodecV2 + // Ensure all blocks in the same chunk use the same hardfork name + // If a different hardfork name is found, truncate the blocks slice at that point + hardforkName := forks.GetHardforkName(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time) + for i := 1; i < len(blocks); i++ { + currentHardfork := forks.GetHardforkName(p.chainCfg, blocks[i].Header.Number.Uint64(), blocks[i].Header.Time) + if currentHardfork != hardforkName { + blocks = blocks[:i] + maxBlocksThisChunk = uint64(i) // update maxBlocksThisChunk to trigger chunking, because these blocks are the last blocks before the hardfork + break + } } + codecVersion := forks.GetCodecVersion(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time) + // Including Curie block in a sole chunk. if p.chainCfg.CurieBlock != nil && blocks[0].Header.Number.Cmp(p.chainCfg.CurieBlock) == 0 { chunk := encoding.Chunk{Blocks: blocks[:1]} @@ -334,10 +333,9 @@ func (p *ChunkProposer) proposeChunk() error { currentTimeSec := uint64(time.Now().Unix()) if metrics.FirstBlockTimestamp+p.chunkTimeoutSec < currentTimeSec || metrics.NumBlocks == maxBlocksThisChunk { log.Info("reached maximum number of blocks in chunk or first block timeout", - "start block number", chunk.Blocks[0].Header.Number, "block count", len(chunk.Blocks), - "block number", chunk.Blocks[0].Header.Number, - "block timestamp", metrics.FirstBlockTimestamp, + "start block number", chunk.Blocks[0].Header.Number, + "start block timestamp", metrics.FirstBlockTimestamp, "current time", currentTimeSec) p.chunkFirstBlockTimeoutReached.Inc() diff --git a/rollup/internal/controller/watcher/chunk_proposer_test.go b/rollup/internal/controller/watcher/chunk_proposer_test.go index c60d646168..1a71827e6b 100644 --- a/rollup/internal/controller/watcher/chunk_proposer_test.go +++ b/rollup/internal/controller/watcher/chunk_proposer_test.go @@ -25,7 +25,6 @@ func testChunkProposerCodecv0Limits(t *testing.T) { maxL1CommitCalldataSize uint64 maxRowConsumption uint64 chunkTimeoutSec uint64 - forkBlock *big.Int expectedChunksLen int expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0 }{ @@ -145,18 +144,6 @@ func testChunkProposerCodecv0Limits(t *testing.T) { expectedChunksLen: 1, expectedBlocksInFirstChunk: 1, }, - { - name: "ForkBlockReached", - maxBlockNum: 100, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - forkBlock: big.NewInt(2), - }, } for _, tt := range tests { @@ -176,9 +163,7 @@ func testChunkProposerCodecv0Limits(t *testing.T) { MaxRowConsumptionPerChunk: tt.maxRowConsumption, ChunkTimeoutSec: tt.chunkTimeoutSec, GasCostIncreaseMultiplier: 1.2, - }, ¶ms.ChainConfig{ - HomesteadBlock: tt.forkBlock, - }, db, nil) + }, ¶ms.ChainConfig{}, db, nil) cp.TryProposeChunk() chunkOrm := orm.NewChunk(db) @@ -209,7 +194,6 @@ func testChunkProposerCodecv1Limits(t *testing.T) { maxL1CommitCalldataSize uint64 maxRowConsumption uint64 chunkTimeoutSec uint64 - forkBlock *big.Int expectedChunksLen int expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0 }{ @@ -329,18 +313,6 @@ func testChunkProposerCodecv1Limits(t *testing.T) { expectedChunksLen: 1, expectedBlocksInFirstChunk: 1, }, - { - name: "ForkBlockReached", - maxBlockNum: 100, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - forkBlock: big.NewInt(2), - }, } for _, tt := range tests { @@ -360,9 +332,7 @@ func testChunkProposerCodecv1Limits(t *testing.T) { MaxRowConsumptionPerChunk: tt.maxRowConsumption, ChunkTimeoutSec: tt.chunkTimeoutSec, GasCostIncreaseMultiplier: 1.2, - }, ¶ms.ChainConfig{ - BernoulliBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock, - }, db, nil) + }, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil) cp.TryProposeChunk() chunkOrm := orm.NewChunk(db) @@ -393,7 +363,6 @@ func testChunkProposerCodecv2Limits(t *testing.T) { maxL1CommitCalldataSize uint64 maxRowConsumption uint64 chunkTimeoutSec uint64 - forkBlock *big.Int expectedChunksLen int expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0 }{ @@ -513,17 +482,175 @@ func testChunkProposerCodecv2Limits(t *testing.T) { expectedChunksLen: 1, expectedBlocksInFirstChunk: 1, }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db := setupDB(t) + defer database.CloseDB(db) + + l2BlockOrm := orm.NewL2Block(db) + err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) + assert.NoError(t, err) + + cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ + MaxBlockNumPerChunk: tt.maxBlockNum, + MaxTxNumPerChunk: tt.maxTxNum, + MaxL1CommitGasPerChunk: tt.maxL1CommitGas, + MaxL1CommitCalldataSizePerChunk: tt.maxL1CommitCalldataSize, + MaxRowConsumptionPerChunk: tt.maxRowConsumption, + ChunkTimeoutSec: tt.chunkTimeoutSec, + GasCostIncreaseMultiplier: 1.2, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, db, nil) + cp.TryProposeChunk() + + chunkOrm := orm.NewChunk(db) + chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0) + assert.NoError(t, err) + assert.Len(t, chunks, tt.expectedChunksLen) + + if len(chunks) > 0 { + blockOrm := orm.NewL2Block(db) + chunkHashes, err := blockOrm.GetChunkHashes(context.Background(), tt.expectedBlocksInFirstChunk) + assert.NoError(t, err) + assert.Len(t, chunkHashes, tt.expectedBlocksInFirstChunk) + firstChunkHash := chunks[0].Hash + for _, chunkHash := range chunkHashes { + assert.Equal(t, firstChunkHash, chunkHash) + } + } + }) + } +} + +func testChunkProposerCodecv3Limits(t *testing.T) { + tests := []struct { + name string + maxBlockNum uint64 + maxTxNum uint64 + maxL1CommitGas uint64 + maxL1CommitCalldataSize uint64 + maxRowConsumption uint64 + chunkTimeoutSec uint64 + expectedChunksLen int + expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0 + }{ + { + name: "NoLimitReached", + maxBlockNum: 100, + maxTxNum: 10000, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 1000000, + chunkTimeoutSec: 1000000000000, + expectedChunksLen: 0, + }, { - name: "ForkBlockReached", + name: "Timeout", maxBlockNum: 100, maxTxNum: 10000, maxL1CommitGas: 50000000000, maxL1CommitCalldataSize: 1000000, maxRowConsumption: 1000000, + chunkTimeoutSec: 0, + expectedChunksLen: 1, + expectedBlocksInFirstChunk: 2, + }, + { + name: "MaxTxNumPerChunkIs0", + maxBlockNum: 10, + maxTxNum: 0, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 1000000, + chunkTimeoutSec: 1000000000000, + expectedChunksLen: 0, + }, + { + name: "MaxL1CommitGasPerChunkIs0", + maxBlockNum: 10, + maxTxNum: 10000, + maxL1CommitGas: 0, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 1000000, + chunkTimeoutSec: 1000000000000, + expectedChunksLen: 0, + }, + { + name: "MaxL1CommitCalldataSizePerChunkIs0", + maxBlockNum: 10, + maxTxNum: 10000, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 0, + maxRowConsumption: 1000000, + chunkTimeoutSec: 1000000000000, + expectedChunksLen: 0, + }, + { + name: "MaxRowConsumptionPerChunkIs0", + maxBlockNum: 100, + maxTxNum: 10000, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 0, + chunkTimeoutSec: 1000000000000, + expectedChunksLen: 0, + }, + { + name: "MaxBlockNumPerChunkIs1", + maxBlockNum: 1, + maxTxNum: 10000, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 1000000, + chunkTimeoutSec: 1000000000000, + expectedChunksLen: 1, + expectedBlocksInFirstChunk: 1, + }, + { + name: "MaxTxNumPerChunkIsFirstBlock", + maxBlockNum: 10, + maxTxNum: 2, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 1000000, + chunkTimeoutSec: 1000000000000, + expectedChunksLen: 1, + expectedBlocksInFirstChunk: 1, + }, + { + name: "MaxL1CommitGasPerChunkIsFirstBlock", + maxBlockNum: 10, + maxTxNum: 10000, + maxL1CommitGas: 62500, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 1000000, + chunkTimeoutSec: 1000000000000, + expectedChunksLen: 1, + expectedBlocksInFirstChunk: 1, + }, + { + name: "MaxL1CommitCalldataSizePerChunkIsFirstBlock", + maxBlockNum: 10, + maxTxNum: 10000, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 60, + maxRowConsumption: 1000000, + chunkTimeoutSec: 1000000000000, + expectedChunksLen: 1, + expectedBlocksInFirstChunk: 1, + }, + { + name: "MaxRowConsumptionPerChunkIs1", + maxBlockNum: 10, + maxTxNum: 10000, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 1, chunkTimeoutSec: 1000000000000, expectedChunksLen: 1, expectedBlocksInFirstChunk: 1, - forkBlock: big.NewInt(2), }, } @@ -545,7 +672,7 @@ func testChunkProposerCodecv2Limits(t *testing.T) { ChunkTimeoutSec: tt.chunkTimeoutSec, GasCostIncreaseMultiplier: 1.2, MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock}, db, nil) + }, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}, db, nil) cp.TryProposeChunk() chunkOrm := orm.NewChunk(db) @@ -568,7 +695,7 @@ func testChunkProposerCodecv2Limits(t *testing.T) { } func testChunkProposerBlobSizeLimit(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2} + codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} for _, codecVersion := range codecVersions { db := setupDB(t) block := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json") @@ -584,8 +711,10 @@ func testChunkProposerBlobSizeLimit(t *testing.T) { chainConfig = ¶ms.ChainConfig{} } else if codecVersion == encoding.CodecV1 { chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} - } else { + } else if codecVersion == encoding.CodecV2 { chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} + } else { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} } cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ @@ -615,6 +744,8 @@ func testChunkProposerBlobSizeLimit(t *testing.T) { numBlocksMultiplier = 22 } else if codecVersion == encoding.CodecV2 { numBlocksMultiplier = 255 + } else { + numBlocksMultiplier = 255 } assert.Len(t, chunks, expectedNumChunks) @@ -629,12 +760,15 @@ func testChunkProposerBlobSizeLimit(t *testing.T) { } } -func testChunkProposerIncludeCurieBlockInOneChunk(t *testing.T) { +func testChunkProposerRespectHardforks(t *testing.T) { db := setupDB(t) + defer database.CloseDB(db) + block := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json") - for i := int64(0); i < 10; i++ { + for i := int64(1); i <= 20; i++ { l2BlockOrm := orm.NewL2Block(db) block.Header.Number = big.NewInt(i) + block.Header.Time = uint64(i) err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block}) assert.NoError(t, err) } @@ -645,12 +779,16 @@ func testChunkProposerIncludeCurieBlockInOneChunk(t *testing.T) { MaxL1CommitGasPerChunk: math.MaxUint64, MaxL1CommitCalldataSizePerChunk: math.MaxUint64, MaxRowConsumptionPerChunk: math.MaxUint64, - ChunkTimeoutSec: math.MaxUint64, + ChunkTimeoutSec: 0, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(1), CurieBlock: big.NewInt(2)}, db, nil) + }, ¶ms.ChainConfig{ + BernoulliBlock: big.NewInt(1), + CurieBlock: big.NewInt(2), + DarwinTime: func() *uint64 { t := uint64(4); return &t }(), + }, db, nil) - for i := 0; i < 2; i++ { + for i := 0; i < 5; i++ { cp.TryProposeChunk() } @@ -658,9 +796,9 @@ func testChunkProposerIncludeCurieBlockInOneChunk(t *testing.T) { chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0) assert.NoError(t, err) - assert.Len(t, chunks, 2) + assert.Len(t, chunks, 4) + expectedEndBlockNumbers := []uint64{1, 2, 3, 20} for i, chunk := range chunks { - assert.Equal(t, uint64(i+1), chunk.EndBlockNumber) + assert.Equal(t, expectedEndBlockNumbers[i], chunk.EndBlockNumber) } - database.CloseDB(db) } diff --git a/rollup/internal/controller/watcher/watcher_test.go b/rollup/internal/controller/watcher/watcher_test.go index 9413623996..03498b3c30 100644 --- a/rollup/internal/controller/watcher/watcher_test.go +++ b/rollup/internal/controller/watcher/watcher_test.go @@ -103,18 +103,26 @@ func TestFunction(t *testing.T) { t.Run("TestChunkProposerCodecv0Limits", testChunkProposerCodecv0Limits) t.Run("TestChunkProposerCodecv1Limits", testChunkProposerCodecv1Limits) t.Run("TestChunkProposerCodecv2Limits", testChunkProposerCodecv2Limits) + t.Run("TestChunkProposerCodecv3Limits", testChunkProposerCodecv3Limits) t.Run("TestChunkProposerBlobSizeLimit", testChunkProposerBlobSizeLimit) - t.Run("TestChunkProposerIncludeCurieBlockInOneChunk", testChunkProposerIncludeCurieBlockInOneChunk) + t.Run("TestChunkProposerRespectHardforks", testChunkProposerRespectHardforks) // Run batch proposer test cases. t.Run("TestBatchProposerCodecv0Limits", testBatchProposerCodecv0Limits) t.Run("TestBatchProposerCodecv1Limits", testBatchProposerCodecv1Limits) t.Run("TestBatchProposerCodecv2Limits", testBatchProposerCodecv2Limits) + t.Run("TestBatchProposerCodecv3Limits", testBatchProposerCodecv3Limits) t.Run("TestBatchCommitGasAndCalldataSizeCodecv0Estimation", testBatchCommitGasAndCalldataSizeCodecv0Estimation) t.Run("TestBatchCommitGasAndCalldataSizeCodecv1Estimation", testBatchCommitGasAndCalldataSizeCodecv1Estimation) t.Run("TestBatchCommitGasAndCalldataSizeCodecv2Estimation", testBatchCommitGasAndCalldataSizeCodecv2Estimation) + t.Run("TestBatchCommitGasAndCalldataSizeCodecv3Estimation", testBatchCommitGasAndCalldataSizeCodecv3Estimation) t.Run("TestBatchProposerBlobSizeLimit", testBatchProposerBlobSizeLimit) t.Run("TestBatchProposerMaxChunkNumPerBatchLimit", testBatchProposerMaxChunkNumPerBatchLimit) + t.Run("TestBatchProposerRespectHardforks", testBatchProposerRespectHardforks) + + // Run bundle proposer test cases. + t.Run("TestBundleProposerLimits", testBundleProposerLimits) + t.Run("TestBundleProposerRespectHardforks", testBundleProposerRespectHardforks) } func readBlockFromJSON(t *testing.T, filename string) *encoding.Block { diff --git a/rollup/internal/orm/batch.go b/rollup/internal/orm/batch.go index 0c0c00545a..dc572c2f98 100644 --- a/rollup/internal/orm/batch.go +++ b/rollup/internal/orm/batch.go @@ -34,6 +34,7 @@ type Batch struct { WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"` ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"` BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"` + CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"` // proof ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"` @@ -58,6 +59,9 @@ type Batch struct { BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"` BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"` + // bundle + BundleHash string `json:"bundle_hash" gorm:"column:bundle_hash"` + // metadata TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas;default:0"` TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size;default:0"` @@ -157,6 +161,26 @@ func (o *Batch) GetFirstUnbatchedChunkIndex(ctx context.Context) (uint64, error) return latestBatch.EndChunkIndex + 1, nil } +// GetBatchesGEIndexGECodecVersion retrieves batches that have a batch index greater than or equal to the given index and codec version. +// The returned batches are sorted in ascending order by their index. +func (o *Batch) GetBatchesGEIndexGECodecVersion(ctx context.Context, index uint64, codecv encoding.CodecVersion, limit int) ([]*Batch, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Batch{}) + db = db.Where("index >= ?", index) + db = db.Where("codec_version >= ?", codecv) + db = db.Order("index ASC") + + if limit > 0 { + db = db.Limit(limit) + } + + var batches []*Batch + if err := db.Find(&batches).Error; err != nil { + return nil, fmt.Errorf("Batch.GetBatchesGEIndexGECodecVersion error: %w", err) + } + return batches, nil +} + // GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes. func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) { if len(hashes) == 0 { @@ -264,6 +288,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer WithdrawRoot: batch.WithdrawRoot().Hex(), ParentBatchHash: batch.ParentBatchHash.Hex(), BatchHeader: batchMeta.BatchBytes, + CodecVersion: int16(codecVersion), ChunkProofsStatus: int16(types.ChunkProofsStatusPending), ProvingStatus: int16(types.ProvingTaskUnassigned), RollupStatus: int16(types.RollupPending), @@ -391,7 +416,7 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st db = db.Where("hash", hash) if err := db.Updates(updateFields).Error; err != nil { - return fmt.Errorf("Batch.UpdateFinalizeTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, commitTxHash: %v", err, hash, status.String(), finalizeTxHash) + return fmt.Errorf("Batch.UpdateFinalizeTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, finalizeTxHash: %v", err, hash, status.String(), finalizeTxHash) } return nil } @@ -417,3 +442,73 @@ func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *messa } return nil } + +// UpdateBundleHashInRange updates the bundle_hash for bundles within the specified range (inclusive). +// The range is closed, i.e., it includes both start and end indices. +func (o *Batch) UpdateBundleHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, bundleHash string, dbTX ...*gorm.DB) error { + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) + db = db.Model(&Batch{}) + db = db.Where("index >= ? AND index <= ?", startIndex, endIndex) + + if err := db.Update("bundle_hash", bundleHash).Error; err != nil { + return fmt.Errorf("Batch.UpdateBundleHashInRange error: %w, start index: %v, end index: %v, batch hash: %v", err, startIndex, endIndex, bundleHash) + } + return nil +} + +// UpdateProvingStatusByBundleHash updates the proving_status for batches within the specified bundle_hash +func (o *Batch) UpdateProvingStatusByBundleHash(ctx context.Context, bundleHash string, status types.ProvingStatus, dbTX ...*gorm.DB) error { + updateFields := make(map[string]interface{}) + updateFields["proving_status"] = int(status) + + switch status { + case types.ProvingTaskAssigned: + updateFields["prover_assigned_at"] = time.Now() + case types.ProvingTaskUnassigned: + updateFields["prover_assigned_at"] = nil + case types.ProvingTaskVerified: + updateFields["proved_at"] = time.Now() + } + + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) + db = db.Model(&Batch{}) + db = db.Where("bundle_hash = ?", bundleHash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Batch.UpdateProvingStatusByBundleHash error: %w, bundle hash: %v, status: %v", err, bundleHash, status.String()) + } + return nil +} + +// UpdateFinalizeTxHashAndRollupStatusByBundleHash updates the finalize transaction hash and rollup status for batches with the specified bundle_hash +func (o *Batch) UpdateFinalizeTxHashAndRollupStatusByBundleHash(ctx context.Context, bundleHash string, finalizeTxHash string, status types.RollupStatus, dbTX ...*gorm.DB) error { + updateFields := make(map[string]interface{}) + updateFields["finalize_tx_hash"] = finalizeTxHash + updateFields["rollup_status"] = int(status) + + switch status { + case types.RollupFinalized: + updateFields["finalized_at"] = utils.NowUTC() + } + + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) + db = db.Model(&Batch{}) + db = db.Where("bundle_hash = ?", bundleHash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Batch.UpdateFinalizeTxHashAndRollupStatusByBundleHash error: %w, bundle hash: %v, status: %v", err, bundleHash, status.String()) + } + return nil +} diff --git a/rollup/internal/orm/bundle.go b/rollup/internal/orm/bundle.go new file mode 100644 index 0000000000..6965f6dfae --- /dev/null +++ b/rollup/internal/orm/bundle.go @@ -0,0 +1,280 @@ +package orm + +import ( + "context" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/scroll-tech/da-codec/encoding" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/crypto" + "gorm.io/gorm" + + "scroll-tech/common/types" + "scroll-tech/common/types/message" + "scroll-tech/common/utils" +) + +// Bundle represents a bundle of batches. +type Bundle struct { + db *gorm.DB `gorm:"column:-"` + + // bundle + Index uint64 `json:"index" gorm:"column:index;primaryKey"` + Hash string `json:"hash" gorm:"column:hash"` + StartBatchIndex uint64 `json:"start_batch_index" gorm:"column:start_batch_index"` + EndBatchIndex uint64 `json:"end_batch_index" gorm:"column:end_batch_index"` + StartBatchHash string `json:"start_batch_hash" gorm:"column:start_batch_hash"` + EndBatchHash string `json:"end_batch_hash" gorm:"column:end_batch_hash"` + CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"` + + // proof + BatchProofsStatus int16 `json:"batch_proofs_status" gorm:"column:batch_proofs_status;default:1"` + ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"` + Proof []byte `json:"proof" gorm:"column:proof;default:NULL"` + ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"` + ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"` + + // rollup + RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"` + FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"` + FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"` + + // metadata + CreatedAt time.Time `json:"created_at" gorm:"column:created_at"` + UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"` + DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"` +} + +// NewBundle creates a new Bundle database instance. +func NewBundle(db *gorm.DB) *Bundle { + return &Bundle{db: db} +} + +// TableName returns the table name for the Bundle model. +func (*Bundle) TableName() string { + return "bundle" +} + +// getLatestBundle retrieves the latest bundle from the database. +func (o *Bundle) getLatestBundle(ctx context.Context) (*Bundle, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Order("index desc") + + var latestBundle Bundle + if err := db.First(&latestBundle).Error; err != nil { + if err == gorm.ErrRecordNotFound { + return nil, nil + } + return nil, fmt.Errorf("getLatestBundle error: %w", err) + } + return &latestBundle, nil +} + +// GetBundles retrieves selected bundles from the database. +// The returned bundles are sorted in ascending order by their index. +// only used in unit tests. +func (o *Bundle) GetBundles(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*Bundle, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + + for key, value := range fields { + db = db.Where(key, value) + } + + for _, orderBy := range orderByList { + db = db.Order(orderBy) + } + + if limit > 0 { + db = db.Limit(limit) + } + + db = db.Order("index ASC") + + var bundles []*Bundle + if err := db.Find(&bundles).Error; err != nil { + return nil, fmt.Errorf("Bundle.GetBundles error: %w, fields: %v, orderByList: %v", err, fields, orderByList) + } + return bundles, nil +} + +// GetFirstUnbundledBatchIndex retrieves the first unbundled batch index. +func (o *Bundle) GetFirstUnbundledBatchIndex(ctx context.Context) (uint64, error) { + // Get the latest bundle + latestBundle, err := o.getLatestBundle(ctx) + if err != nil { + return 0, fmt.Errorf("Bundle.GetFirstUnbundledBatchIndex error: %w", err) + } + if latestBundle == nil { + return 0, nil + } + return latestBundle.EndBatchIndex + 1, nil +} + +// GetFirstPendingBundle retrieves the first pending bundle from the database. +func (o *Bundle) GetFirstPendingBundle(ctx context.Context) (*Bundle, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("rollup_status = ?", types.RollupPending) + db = db.Order("index asc") + + var pendingBundle Bundle + if err := db.First(&pendingBundle).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, nil + } + return nil, fmt.Errorf("Bundle.GetFirstPendingBundle error: %w", err) + } + return &pendingBundle, nil +} + +// GetVerifiedProofByHash retrieves the verified aggregate proof for a bundle with the given hash. +func (o *Bundle) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.BundleProof, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Select("proof") + db = db.Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified) + + var bundle Bundle + if err := db.Find(&bundle).Error; err != nil { + return nil, fmt.Errorf("Bundle.GetVerifiedProofByHash error: %w, bundle hash: %v", err, hash) + } + + var proof message.BundleProof + if err := json.Unmarshal(bundle.Proof, &proof); err != nil { + return nil, fmt.Errorf("Bundle.GetVerifiedProofByHash error: %w, bundle hash: %v", err, hash) + } + return &proof, nil +} + +// InsertBundle inserts a new bundle into the database. +// Assuming input batches are ordered by index. +func (o *Bundle) InsertBundle(ctx context.Context, batches []*Batch, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Bundle, error) { + if len(batches) == 0 { + return nil, errors.New("Bundle.InsertBundle error: no batches provided") + } + + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) + db = db.Model(&Bundle{}) + + newBundle := Bundle{ + StartBatchHash: batches[0].Hash, + StartBatchIndex: batches[0].Index, + EndBatchHash: batches[len(batches)-1].Hash, + EndBatchIndex: batches[len(batches)-1].Index, + BatchProofsStatus: int16(types.BatchProofsStatusPending), + ProvingStatus: int16(types.ProvingTaskUnassigned), + RollupStatus: int16(types.RollupPending), + CodecVersion: int16(codecVersion), + } + + // Not part of DA hash, used for SQL query consistency and ease of use. + // Derived using keccak256(concat(start_batch_hash_bytes, end_batch_hash_bytes)). + newBundle.Hash = hex.EncodeToString(crypto.Keccak256(append(common.Hex2Bytes(newBundle.StartBatchHash[2:]), common.Hex2Bytes(newBundle.EndBatchHash[2:])...))) + + if err := db.Create(&newBundle).Error; err != nil { + return nil, fmt.Errorf("Bundle.InsertBundle Create error: %w, bundle hash: %v", err, newBundle.Hash) + } + + return &newBundle, nil +} + +// UpdateFinalizeTxHashAndRollupStatus updates the finalize transaction hash and rollup status for a bundle. +func (o *Bundle) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error { + updateFields := make(map[string]interface{}) + updateFields["finalize_tx_hash"] = finalizeTxHash + updateFields["rollup_status"] = int(status) + if status == types.RollupFinalized { + updateFields["finalized_at"] = time.Now() + } + + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("hash", hash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Bundle.UpdateFinalizeTxHashAndRollupStatus error: %w, bundle hash: %v, status: %v, finalizeTxHash: %v", err, hash, status.String(), finalizeTxHash) + } + return nil +} + +// UpdateProvingStatus updates the proving status of a bundle. +func (o *Bundle) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error { + updateFields := make(map[string]interface{}) + updateFields["proving_status"] = int(status) + + switch status { + case types.ProvingTaskVerified: + updateFields["proved_at"] = time.Now() + } + + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("hash", hash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Bundle.UpdateProvingStatus error: %w, bundle hash: %v, status: %v", err, hash, status.String()) + } + return nil +} + +// UpdateRollupStatus updates the rollup status for a bundle. +// only used in unit tests. +func (o *Bundle) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus) error { + updateFields := make(map[string]interface{}) + updateFields["rollup_status"] = int(status) + if status == types.RollupFinalized { + updateFields["finalized_at"] = time.Now() + } + + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("hash", hash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Bundle.UpdateRollupStatus error: %w, bundle hash: %v, status: %v", err, hash, status.String()) + } + return nil +} + +// UpdateProofAndProvingStatusByHash updates the bundle proof and proving status by hash. +// only used in unit tests. +func (o *Bundle) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.BundleProof, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error { + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + + proofBytes, err := json.Marshal(proof) + if err != nil { + return err + } + + updateFields := make(map[string]interface{}) + updateFields["proof"] = proofBytes + updateFields["proving_status"] = provingStatus + updateFields["proof_time_sec"] = proofTimeSec + updateFields["proved_at"] = utils.NowUTC() + + db = db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("hash", hash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Bundle.UpdateProofAndProvingStatusByHash error: %w, bundle hash: %v", err, hash) + } + return nil +} diff --git a/rollup/internal/orm/orm_test.go b/rollup/internal/orm/orm_test.go index f37f169c48..85ff5b96e4 100644 --- a/rollup/internal/orm/orm_test.go +++ b/rollup/internal/orm/orm_test.go @@ -10,6 +10,8 @@ import ( "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/da-codec/encoding/codecv0" "github.com/scroll-tech/da-codec/encoding/codecv1" + "github.com/scroll-tech/da-codec/encoding/codecv2" + "github.com/scroll-tech/da-codec/encoding/codecv3" "github.com/scroll-tech/go-ethereum/common" gethTypes "github.com/scroll-tech/go-ethereum/core/types" "github.com/stretchr/testify/assert" @@ -17,6 +19,7 @@ import ( "scroll-tech/common/testcontainers" "scroll-tech/common/types" + "scroll-tech/common/types/message" "scroll-tech/database/migrate" "scroll-tech/rollup/internal/utils" @@ -29,6 +32,7 @@ var ( l2BlockOrm *L2Block chunkOrm *Chunk batchOrm *Batch + bundleOrm *Bundle pendingTransactionOrm *PendingTransaction block1 *encoding.Block @@ -59,6 +63,7 @@ func setupEnv(t *testing.T) { assert.NoError(t, err) assert.NoError(t, migrate.ResetDB(sqlDB)) + bundleOrm = NewBundle(db) batchOrm = NewBatch(db) chunkOrm = NewChunk(db) l2BlockOrm = NewL2Block(db) @@ -165,7 +170,7 @@ func TestL2BlockOrm(t *testing.T) { } func TestChunkOrm(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1} + codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} for _, codecVersion := range codecVersions { @@ -184,7 +189,7 @@ func TestChunkOrm(t *testing.T) { assert.NoError(t, createErr) chunkHash2, err = daChunk2.Hash() assert.NoError(t, err) - } else { + } else if codecVersion == encoding.CodecV1 { daChunk1, createErr := codecv1.NewDAChunk(chunk1, 0) assert.NoError(t, createErr) chunkHash1, err = daChunk1.Hash() @@ -194,6 +199,26 @@ func TestChunkOrm(t *testing.T) { assert.NoError(t, createErr) chunkHash2, err = daChunk2.Hash() assert.NoError(t, err) + } else if codecVersion == encoding.CodecV2 { + daChunk1, createErr := codecv2.NewDAChunk(chunk1, 0) + assert.NoError(t, createErr) + chunkHash1, err = daChunk1.Hash() + assert.NoError(t, err) + + daChunk2, createErr := codecv2.NewDAChunk(chunk2, chunk1.NumL1Messages(0)) + assert.NoError(t, createErr) + chunkHash2, err = daChunk2.Hash() + assert.NoError(t, err) + } else { + daChunk1, createErr := codecv3.NewDAChunk(chunk1, 0) + assert.NoError(t, createErr) + chunkHash1, err = daChunk1.Hash() + assert.NoError(t, err) + + daChunk2, createErr := codecv3.NewDAChunk(chunk2, chunk1.NumL1Messages(0)) + assert.NoError(t, createErr) + chunkHash2, err = daChunk2.Hash() + assert.NoError(t, err) } dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, utils.ChunkMetrics{}) @@ -238,7 +263,7 @@ func TestChunkOrm(t *testing.T) { } func TestBatchOrm(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1} + codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} for _, codecVersion := range codecVersions { @@ -247,10 +272,8 @@ func TestBatchOrm(t *testing.T) { assert.NoError(t, migrate.ResetDB(sqlDB)) batch := &encoding.Batch{ - Index: 0, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk1}, + Index: 0, + Chunks: []*encoding.Chunk{chunk1}, } batch1, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, utils.BatchMetrics{}) assert.NoError(t, err) @@ -264,18 +287,24 @@ func TestBatchOrm(t *testing.T) { daBatch1, createErr := codecv0.NewDABatchFromBytes(batch1.BatchHeader) assert.NoError(t, createErr) batchHash1 = daBatch1.Hash().Hex() - } else { + } else if codecVersion == encoding.CodecV1 { daBatch1, createErr := codecv1.NewDABatchFromBytes(batch1.BatchHeader) assert.NoError(t, createErr) batchHash1 = daBatch1.Hash().Hex() + } else if codecVersion == encoding.CodecV2 { + daBatch1, createErr := codecv2.NewDABatchFromBytes(batch1.BatchHeader) + assert.NoError(t, createErr) + batchHash1 = daBatch1.Hash().Hex() + } else { + daBatch1, createErr := codecv3.NewDABatchFromBytes(batch1.BatchHeader) + assert.NoError(t, createErr) + batchHash1 = daBatch1.Hash().Hex() } assert.Equal(t, hash1, batchHash1) batch = &encoding.Batch{ - Index: 1, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk2}, + Index: 1, + Chunks: []*encoding.Chunk{chunk2}, } batch2, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, utils.BatchMetrics{}) assert.NoError(t, err) @@ -351,9 +380,194 @@ func TestBatchOrm(t *testing.T) { assert.NotNil(t, updatedBatch) assert.Equal(t, "finalizeTxHash", updatedBatch.FinalizeTxHash) assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(updatedBatch.RollupStatus)) + + batches, err := batchOrm.GetBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 0) + assert.NoError(t, err) + assert.Equal(t, 2, len(batches)) + assert.Equal(t, batchHash1, batches[0].Hash) + assert.Equal(t, batchHash2, batches[1].Hash) + + batches, err = batchOrm.GetBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 1) + assert.NoError(t, err) + assert.Equal(t, 1, len(batches)) + assert.Equal(t, batchHash1, batches[0].Hash) + + batches, err = batchOrm.GetBatchesGEIndexGECodecVersion(context.Background(), 1, codecVersion, 0) + assert.NoError(t, err) + assert.Equal(t, 1, len(batches)) + assert.Equal(t, batchHash2, batches[0].Hash) + + batches, err = batchOrm.GetBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion+1, 0) + assert.NoError(t, err) + assert.Equal(t, 0, len(batches)) + + err = batchOrm.UpdateBundleHashInRange(context.Background(), 0, 0, "test hash") + assert.NoError(t, err) + + err = batchOrm.UpdateProvingStatusByBundleHash(context.Background(), "test hash", types.ProvingTaskFailed) + assert.NoError(t, err) + + err = batchOrm.UpdateFinalizeTxHashAndRollupStatusByBundleHash(context.Background(), "test hash", "tx hash", types.RollupCommitFailed) + assert.NoError(t, err) + + batches, err = batchOrm.GetBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 0) + assert.NoError(t, err) + assert.Equal(t, 2, len(batches)) + assert.Equal(t, batchHash1, batches[0].Hash) + assert.Equal(t, batchHash2, batches[1].Hash) + assert.Equal(t, types.ProvingTaskFailed, types.ProvingStatus(batches[0].ProvingStatus)) + assert.Equal(t, types.RollupCommitFailed, types.RollupStatus(batches[0].RollupStatus)) + assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(batches[1].ProvingStatus)) + assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(batches[1].RollupStatus)) } } +func TestBundleOrm(t *testing.T) { + sqlDB, err := db.DB() + assert.NoError(t, err) + assert.NoError(t, migrate.ResetDB(sqlDB)) + + chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} + batch1 := &encoding.Batch{ + Index: 0, + Chunks: []*encoding.Chunk{chunk1}, + } + dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV3, utils.BatchMetrics{}) + assert.NoError(t, err) + + chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} + batch2 := &encoding.Batch{ + Index: 1, + Chunks: []*encoding.Chunk{chunk2}, + } + dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV3, utils.BatchMetrics{}) + assert.NoError(t, err) + + var bundle1 *Bundle + var bundle2 *Bundle + + t.Run("InsertBundle", func(t *testing.T) { + bundle1, err = bundleOrm.InsertBundle(context.Background(), []*Batch{dbBatch1}, encoding.CodecV3) + assert.NoError(t, err) + assert.NotNil(t, bundle1) + assert.Equal(t, uint64(0), bundle1.StartBatchIndex) + assert.Equal(t, uint64(0), bundle1.EndBatchIndex) + assert.Equal(t, dbBatch1.Hash, bundle1.StartBatchHash) + assert.Equal(t, dbBatch1.Hash, bundle1.EndBatchHash) + assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(bundle1.CodecVersion)) + + bundle2, err = bundleOrm.InsertBundle(context.Background(), []*Batch{dbBatch2}, encoding.CodecV3) + assert.NoError(t, err) + assert.NotNil(t, bundle2) + assert.Equal(t, uint64(1), bundle2.StartBatchIndex) + assert.Equal(t, uint64(1), bundle2.EndBatchIndex) + assert.Equal(t, dbBatch2.Hash, bundle2.StartBatchHash) + assert.Equal(t, dbBatch2.Hash, bundle2.EndBatchHash) + assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(bundle2.CodecVersion)) + }) + + t.Run("GetFirstUnbundledBatchIndex", func(t *testing.T) { + index, err := bundleOrm.GetFirstUnbundledBatchIndex(context.Background()) + assert.NoError(t, err) + assert.Equal(t, uint64(2), index) + }) + + t.Run("GetFirstPendingBundle", func(t *testing.T) { + bundle, err := bundleOrm.GetFirstPendingBundle(context.Background()) + assert.NoError(t, err) + assert.NotNil(t, bundle) + assert.Equal(t, int16(types.RollupPending), bundle.RollupStatus) + }) + + t.Run("UpdateFinalizeTxHashAndRollupStatus", func(t *testing.T) { + err := bundleOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), bundle1.Hash, "0xabcd", types.RollupFinalized) + assert.NoError(t, err) + + pendingBundle, err := bundleOrm.GetFirstPendingBundle(context.Background()) + assert.NoError(t, err) + assert.Equal(t, uint64(2), pendingBundle.Index) + + var finalizedBundle Bundle + err = db.Where("hash = ?", bundle1.Hash).First(&finalizedBundle).Error + assert.NoError(t, err) + assert.Equal(t, "0xabcd", finalizedBundle.FinalizeTxHash) + assert.Equal(t, int16(types.RollupFinalized), finalizedBundle.RollupStatus) + assert.NotNil(t, finalizedBundle.FinalizedAt) + }) + + t.Run("UpdateProvingStatus", func(t *testing.T) { + err := bundleOrm.UpdateProvingStatus(context.Background(), bundle1.Hash, types.ProvingTaskAssigned) + assert.NoError(t, err) + + var bundle Bundle + err = db.Where("hash = ?", bundle1.Hash).First(&bundle).Error + assert.NoError(t, err) + assert.Equal(t, int16(types.ProvingTaskAssigned), bundle.ProvingStatus) + + err = bundleOrm.UpdateProvingStatus(context.Background(), bundle1.Hash, types.ProvingTaskVerified) + assert.NoError(t, err) + + err = db.Where("hash = ?", bundle1.Hash).First(&bundle).Error + assert.NoError(t, err) + assert.Equal(t, int16(types.ProvingTaskVerified), bundle.ProvingStatus) + assert.NotNil(t, bundle.ProvedAt) + }) + + t.Run("GetVerifiedProofByHash", func(t *testing.T) { + proof := &message.BundleProof{ + Proof: []byte("test proof"), + } + proofBytes, err := json.Marshal(proof) + assert.NoError(t, err) + + err = db.Model(&Bundle{}).Where("hash = ?", bundle1.Hash).Update("proof", proofBytes).Error + assert.NoError(t, err) + + retrievedProof, err := bundleOrm.GetVerifiedProofByHash(context.Background(), bundle1.Hash) + assert.NoError(t, err) + assert.Equal(t, proof.Proof, retrievedProof.Proof) + }) + + t.Run("GetBundles", func(t *testing.T) { + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, []string{}, 0) + assert.NoError(t, err) + assert.Equal(t, 2, len(bundles)) + assert.Equal(t, bundle1.Hash, bundles[0].Hash) + assert.Equal(t, bundle2.Hash, bundles[1].Hash) + }) + + t.Run("UpdateProofAndProvingStatusByHash", func(t *testing.T) { + proof := &message.BundleProof{ + Proof: []byte("new test proof"), + } + err := bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle2.Hash, proof, types.ProvingTaskVerified, 600) + assert.NoError(t, err) + + var bundle Bundle + err = db.Where("hash = ?", bundle2.Hash).First(&bundle).Error + assert.NoError(t, err) + assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(bundle.ProvingStatus)) + assert.Equal(t, int32(600), bundle.ProofTimeSec) + assert.NotNil(t, bundle.ProvedAt) + + var retrievedProof message.BundleProof + err = json.Unmarshal(bundle.Proof, &retrievedProof) + assert.NoError(t, err) + assert.Equal(t, proof.Proof, retrievedProof.Proof) + }) + + t.Run("UpdateRollupStatus", func(t *testing.T) { + err := bundleOrm.UpdateRollupStatus(context.Background(), bundle2.Hash, types.RollupFinalized) + assert.NoError(t, err) + + var bundle Bundle + err = db.Where("hash = ?", bundle2.Hash).First(&bundle).Error + assert.NoError(t, err) + assert.Equal(t, types.RollupFinalized, types.RollupStatus(bundle.RollupStatus)) + assert.NotNil(t, bundle.FinalizedAt) + }) +} + func TestPendingTransactionOrm(t *testing.T) { sqlDB, err := db.DB() assert.NoError(t, err) diff --git a/rollup/internal/utils/utils.go b/rollup/internal/utils/utils.go index 9551ea0133..2bdda363fc 100644 --- a/rollup/internal/utils/utils.go +++ b/rollup/internal/utils/utils.go @@ -95,6 +95,22 @@ func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVer return nil, fmt.Errorf("failed to estimate codecv2 chunk L1 commit batch size and blob size: %w", err) } return metrics, nil + case encoding.CodecV3: + start := time.Now() + metrics.L1CommitGas = codecv3.EstimateChunkL1CommitGas(chunk) + metrics.EstimateGasTime = time.Since(start) + + start = time.Now() + metrics.L1CommitCalldataSize = codecv3.EstimateChunkL1CommitCalldataSize(chunk) + metrics.EstimateCalldataSizeTime = time.Since(start) + + start = time.Now() + metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv3.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk) + metrics.EstimateBlobSizeTime = time.Since(start) + if err != nil { + return nil, fmt.Errorf("failed to estimate codecv3 chunk L1 commit batch size and blob size: %w", err) + } + return metrics, nil default: return nil, fmt.Errorf("unsupported codec version: %v", codecVersion) } @@ -203,6 +219,22 @@ func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVer return nil, fmt.Errorf("failed to estimate codecv2 batch L1 commit batch size and blob size: %w", err) } return metrics, nil + case encoding.CodecV3: + start := time.Now() + metrics.L1CommitGas = codecv3.EstimateBatchL1CommitGas(batch) + metrics.EstimateGasTime = time.Since(start) + + start = time.Now() + metrics.L1CommitCalldataSize = codecv3.EstimateBatchL1CommitCalldataSize(batch) + metrics.EstimateCalldataSizeTime = time.Since(start) + + start = time.Now() + metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv3.EstimateBatchL1CommitBatchSizeAndBlobSize(batch) + metrics.EstimateBlobSizeTime = time.Since(start) + if err != nil { + return nil, fmt.Errorf("failed to estimate codecv3 batch L1 commit batch size and blob size: %w", err) + } + return metrics, nil default: return nil, fmt.Errorf("unsupported codec version: %v", codecVersion) } @@ -241,6 +273,16 @@ func GetChunkHash(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64, code return common.Hash{}, fmt.Errorf("failed to get codecv2 DA chunk hash: %w", err) } return chunkHash, nil + case encoding.CodecV3: + daChunk, err := codecv3.NewDAChunk(chunk, totalL1MessagePoppedBefore) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to create codecv3 DA chunk: %w", err) + } + chunkHash, err := daChunk.Hash() + if err != nil { + return common.Hash{}, fmt.Errorf("failed to get codecv3 DA chunk hash: %w", err) + } + return chunkHash, nil default: return common.Hash{}, fmt.Errorf("unsupported codec version: %v", codecVersion) } @@ -374,6 +416,44 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion) return nil, fmt.Errorf("failed to get codecv2 end DA chunk hash: %w", err) } return batchMeta, nil + case encoding.CodecV3: + daBatch, err := codecv3.NewDABatch(batch) + if err != nil { + return nil, fmt.Errorf("failed to create codecv3 DA batch: %w", err) + } + + blobDataProof, err := daBatch.BlobDataProofForPointEvaluation() + if err != nil { + return nil, fmt.Errorf("failed to get codecv3 blob data proof for point evaluation: %w", err) + } + + batchMeta := &BatchMetadata{ + BatchHash: daBatch.Hash(), + BatchDataHash: daBatch.DataHash, + BatchBlobDataProof: blobDataProof, + BatchBytes: daBatch.Encode(), + } + + startDAChunk, err := codecv3.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore) + if err != nil { + return nil, fmt.Errorf("failed to create codecv3 start DA chunk: %w", err) + } + + batchMeta.StartChunkHash, err = startDAChunk.Hash() + if err != nil { + return nil, fmt.Errorf("failed to get codecv3 start DA chunk hash: %w", err) + } + + endDAChunk, err := codecv3.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk) + if err != nil { + return nil, fmt.Errorf("failed to create codecv3 end DA chunk: %w", err) + } + + batchMeta.EndChunkHash, err = endDAChunk.Hash() + if err != nil { + return nil, fmt.Errorf("failed to get codecv3 end DA chunk hash: %w", err) + } + return batchMeta, nil default: return nil, fmt.Errorf("unsupported codec version: %v", codecVersion) } diff --git a/rollup/mock_bridge/MockBridge.sol b/rollup/mock_bridge/MockBridge.sol index f9dc85201b..c94bc221b3 100644 --- a/rollup/mock_bridge/MockBridge.sol +++ b/rollup/mock_bridge/MockBridge.sol @@ -1,12 +1,17 @@ -// SPDX-License-Identifier: UNLICENSED +// SPDX-License-Identifier: MIT pragma solidity ^0.8.24; import {BatchHeaderV0Codec} from "../../../scroll-contracts/src/libraries/codec/BatchHeaderV0Codec.sol"; import {BatchHeaderV1Codec} from "../../../scroll-contracts/src/libraries/codec/BatchHeaderV1Codec.sol"; +import {BatchHeaderV3Codec} from "../../../scroll-contracts/src/libraries/codec/BatchHeaderV3Codec.sol"; import {ChunkCodecV0} from "../../../scroll-contracts/src/libraries/codec/ChunkCodecV0.sol"; import {ChunkCodecV1} from "../../../scroll-contracts/src/libraries/codec/ChunkCodecV1.sol"; contract MockBridge { + /********** + * Errors * + **********/ + /// @dev Thrown when committing a committed batch. error ErrorBatchIsAlreadyCommitted(); @@ -20,7 +25,7 @@ contract MockBridge { error ErrorCallPointEvaluationPrecompileFailed(); /// @dev Thrown when the transaction has multiple blobs. - error ErrorFoundMultipleBlob(); + error ErrorFoundMultipleBlobs(); /// @dev Thrown when some fields are not zero in genesis batch. error ErrorGenesisBatchHasNonZeroField(); @@ -43,11 +48,8 @@ contract MockBridge { /// @dev Thrown when the batch index is incorrect. error ErrorIncorrectBatchIndex(); - /// @dev Thrown when the previous state root doesn't match stored one. - error ErrorIncorrectPreviousStateRoot(); - - /// @dev Thrown when the batch header version is invalid. - error ErrorInvalidBatchHeaderVersion(); + /// @dev Thrown when the batch version is incorrect. + error ErrorIncorrectBatchVersion(); /// @dev Thrown when no blob found in the transaction. error ErrorNoBlobFound(); @@ -55,9 +57,6 @@ contract MockBridge { /// @dev Thrown when the number of transactions is less than number of L1 message in one block. error ErrorNumTxsLessThanNumL1Msgs(); - /// @dev Thrown when the given previous state is zero. - error ErrorPreviousStateRootIsZero(); - /// @dev Thrown when the given state root is zero. error ErrorStateRootIsZero(); @@ -70,24 +69,37 @@ contract MockBridge { event CommitBatch(uint256 indexed batchIndex, bytes32 indexed batchHash); event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot); - struct L2MessageProof { - uint256 batchIndex; - bytes merkleProof; - } + /************* + * Constants * + *************/ /// @dev Address of the point evaluation precompile used for EIP-4844 blob verification. - address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A); + address internal constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A); /// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the /// point evaluation precompile - uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513; + uint256 internal constant BLS_MODULUS = + 52435875175126190479447740508185965837690552500527637822603658699938581184513; + + /// @notice The chain id of the corresponding layer 2 chain. + uint64 public immutable layer2ChainId; + + /************* + * Variables * + *************/ + + /// @notice The maximum number of transactions allowed in each chunk. + uint256 public maxNumTxInChunk; uint256 public l1BaseFee; uint256 public l1BlobBaseFee; uint256 public l2BaseFee; uint256 public lastFinalizedBatchIndex; + mapping(uint256 => bytes32) public committedBatches; + mapping(uint256 => bytes32) public finalizedStateRoots; + mapping(uint256 => bytes32) public withdrawRoots; function setL1BaseFee(uint256 _l1BaseFee) external { @@ -108,6 +120,8 @@ contract MockBridge { *****************************/ /// @notice Import layer 2 genesis block + /// @param _batchHeader The header of the genesis batch. + /// @param _stateRoot The state root of the genesis block. function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external { // check genesis batch header length if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero(); @@ -141,16 +155,10 @@ contract MockBridge { bytes[] memory _chunks, bytes calldata ) external { - // check whether the batch is empty - if (_chunks.length == 0) revert ErrorBatchIsEmpty(); - - (, bytes32 _parentBatchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _loadBatchHeader( - _parentBatchHeader + (bytes32 _parentBatchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _beforeCommitBatch( + _parentBatchHeader, + _chunks ); - unchecked { - _batchIndex += 1; - } - if (committedBatches[_batchIndex] != 0) revert ErrorBatchIsAlreadyCommitted(); bytes32 _batchHash; uint256 batchPtr; @@ -166,7 +174,7 @@ contract MockBridge { _totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch) } // store entries, the order matters - BatchHeaderV0Codec.storeVersion(batchPtr, _version); + BatchHeaderV0Codec.storeVersion(batchPtr, 0); BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex); BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch); BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall); @@ -177,9 +185,10 @@ contract MockBridge { batchPtr, BatchHeaderV0Codec.BATCH_HEADER_FIXED_LENGTH ); - } else { - bytes32 blobVersionedHash; - (blobVersionedHash, _dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV1( + } else if (_version <= 2) { + // versions 1 and 2 both use ChunkCodecV1 and BatchHeaderV1Codec, + // but they use different blob encoding and different verifiers. + (_dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV1( _totalL1MessagesPoppedOverall, _chunks ); @@ -187,56 +196,125 @@ contract MockBridge { batchPtr := mload(0x40) _totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch) } + // store entries, the order matters - BatchHeaderV1Codec.storeVersion(batchPtr, _version); - BatchHeaderV1Codec.storeBatchIndex(batchPtr, _batchIndex); - BatchHeaderV1Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch); - BatchHeaderV1Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall); - BatchHeaderV1Codec.storeDataHash(batchPtr, _dataHash); - BatchHeaderV1Codec.storeBlobVersionedHash(batchPtr, blobVersionedHash); + // Some are using `BatchHeaderV0Codec`, see comments of `BatchHeaderV1Codec`. + BatchHeaderV0Codec.storeVersion(batchPtr, _version); + BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex); + BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch); + BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall); + BatchHeaderV0Codec.storeDataHash(batchPtr, _dataHash); + BatchHeaderV1Codec.storeBlobVersionedHash(batchPtr, _getBlobVersionedHash()); BatchHeaderV1Codec.storeParentBatchHash(batchPtr, _parentBatchHash); - // compute batch hash - _batchHash = BatchHeaderV1Codec.computeBatchHash( + // compute batch hash, V1 and V2 has same code as V0 + _batchHash = BatchHeaderV0Codec.computeBatchHash( batchPtr, BatchHeaderV1Codec.BATCH_HEADER_FIXED_LENGTH ); + } else { + revert ErrorIncorrectBatchVersion(); } - committedBatches[_batchIndex] = _batchHash; - emit CommitBatch(_batchIndex, _batchHash); + _afterCommitBatch(_batchIndex, _batchHash); + } + + /// @dev This function will revert unless all V0/V1/V2 batches are finalized. This is because we start to + /// pop L1 messages in `commitBatchWithBlobProof` but not in `commitBatch`. We also introduce `finalizedQueueIndex` + /// in `L1MessageQueue`. If one of V0/V1/V2 batches not finalized, `L1MessageQueue.pendingQueueIndex` will not + /// match `parentBatchHeader.totalL1MessagePopped` and thus revert. + function commitBatchWithBlobProof( + uint8 _version, + bytes calldata _parentBatchHeader, + bytes[] memory _chunks, + bytes calldata, + bytes calldata _blobDataProof + ) external { + if (_version <= 2) { + revert ErrorIncorrectBatchVersion(); + } + + // allocate memory of batch header and store entries if necessary, the order matters + // @note why store entries if necessary, to avoid stack overflow problem. + // The codes for `version`, `batchIndex`, `l1MessagePopped`, `totalL1MessagePopped` and `dataHash` + // are the same as `BatchHeaderV0Codec`. + // The codes for `blobVersionedHash`, and `parentBatchHash` are the same as `BatchHeaderV1Codec`. + uint256 batchPtr; + assembly { + batchPtr := mload(0x40) + // This is `BatchHeaderV3Codec.BATCH_HEADER_FIXED_LENGTH`, use `193` here to reduce code + // complexity. Be careful that the length may changed in future versions. + mstore(0x40, add(batchPtr, 193)) + } + BatchHeaderV0Codec.storeVersion(batchPtr, _version); + + (bytes32 _parentBatchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _beforeCommitBatch( + _parentBatchHeader, + _chunks + ); + BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex); + + // versions 2 and 3 both use ChunkCodecV1 + (bytes32 _dataHash, uint256 _totalL1MessagesPoppedInBatch) = _commitChunksV1( + _totalL1MessagesPoppedOverall, + _chunks + ); + unchecked { + _totalL1MessagesPoppedOverall += _totalL1MessagesPoppedInBatch; + } + + BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch); + BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall); + BatchHeaderV0Codec.storeDataHash(batchPtr, _dataHash); + + // verify blob versioned hash + bytes32 _blobVersionedHash = _getBlobVersionedHash(); + _checkBlobVersionedHash(_blobVersionedHash, _blobDataProof); + BatchHeaderV1Codec.storeBlobVersionedHash(batchPtr, _blobVersionedHash); + BatchHeaderV1Codec.storeParentBatchHash(batchPtr, _parentBatchHash); + + uint256 lastBlockTimestamp; + { + bytes memory lastChunk = _chunks[_chunks.length - 1]; + lastBlockTimestamp = ChunkCodecV1.getLastBlockTimestamp(lastChunk); + } + BatchHeaderV3Codec.storeLastBlockTimestamp(batchPtr, lastBlockTimestamp); + BatchHeaderV3Codec.storeBlobDataProof(batchPtr, _blobDataProof); + + // compute batch hash, V3 has same code as V0 + bytes32 _batchHash = BatchHeaderV0Codec.computeBatchHash( + batchPtr, + BatchHeaderV3Codec.BATCH_HEADER_FIXED_LENGTH + ); + + _afterCommitBatch(_batchIndex, _batchHash); } /// @dev We keep this function to upgrade to 4844 more smoothly. function finalizeBatchWithProof( bytes calldata _batchHeader, - bytes32 _prevStateRoot, + bytes32, /*_prevStateRoot*/ bytes32 _postStateRoot, bytes32 _withdrawRoot, bytes calldata ) external { - if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero(); - if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero(); - - // compute batch hash and verify - (, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader); - - // verify previous state root. - if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot(); - - // avoid duplicated verification - if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified(); + (uint256 batchPtr, bytes32 _batchHash, uint256 _batchIndex) = _beforeFinalizeBatch( + _batchHeader, + _postStateRoot + ); - // check and update lastFinalizedBatchIndex - unchecked { - if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex(); - lastFinalizedBatchIndex = _batchIndex; + // compute public input hash + bytes32 _publicInputHash; + { + bytes32 _dataHash = BatchHeaderV0Codec.getDataHash(batchPtr); + bytes32 _prevStateRoot = finalizedStateRoots[_batchIndex - 1]; + _publicInputHash = keccak256( + abi.encodePacked(layer2ChainId, _prevStateRoot, _postStateRoot, _withdrawRoot, _dataHash) + ); } - // record state root and withdraw root - finalizedStateRoots[_batchIndex] = _postStateRoot; - withdrawRoots[_batchIndex] = _withdrawRoot; - - emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot); + // Pop finalized and non-skipped message from L1MessageQueue. + uint256 _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr); + _afterFinalizeBatch(_totalL1MessagesPoppedOverall, _batchIndex, _batchHash, _postStateRoot, _withdrawRoot); } /// @dev Memory layout of `_blobDataProof`: @@ -247,37 +325,140 @@ contract MockBridge { /// ``` function finalizeBatchWithProof4844( bytes calldata _batchHeader, - bytes32 _prevStateRoot, + bytes32, bytes32 _postStateRoot, bytes32 _withdrawRoot, bytes calldata _blobDataProof, bytes calldata ) external { - if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero(); - if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero(); - - // compute batch hash and verify - (uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader); - bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(memPtr); + (uint256 batchPtr, bytes32 _batchHash, uint256 _batchIndex) = _beforeFinalizeBatch( + _batchHeader, + _postStateRoot + ); - // Calls the point evaluation precompile and verifies the output + // compute public input hash + bytes32 _publicInputHash; { - (bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall( - abi.encodePacked(_blobVersionedHash, _blobDataProof) + bytes32 _dataHash = BatchHeaderV0Codec.getDataHash(batchPtr); + bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(batchPtr); + bytes32 _prevStateRoot = finalizedStateRoots[_batchIndex - 1]; + // verify blob versioned hash + _checkBlobVersionedHash(_blobVersionedHash, _blobDataProof); + _publicInputHash = keccak256( + abi.encodePacked( + layer2ChainId, + _prevStateRoot, + _postStateRoot, + _withdrawRoot, + _dataHash, + _blobDataProof[0:64], + _blobVersionedHash + ) ); - // We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the - // response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile - if (!success) revert ErrorCallPointEvaluationPrecompileFailed(); - (, uint256 result) = abi.decode(data, (uint256, uint256)); - if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput(); } - // verify previous state root. - if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot(); + // Pop finalized and non-skipped message from L1MessageQueue. + uint256 _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr); + _afterFinalizeBatch(_totalL1MessagesPoppedOverall, _batchIndex, _batchHash, _postStateRoot, _withdrawRoot); + } + + function finalizeBundleWithProof( + bytes calldata _batchHeader, + bytes32 _postStateRoot, + bytes32 _withdrawRoot, + bytes calldata + ) external { + if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero(); + + // retrieve finalized state root and batch hash from storage + uint256 _finalizedBatchIndex = lastFinalizedBatchIndex; + + // compute pending batch hash and verify + (, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader); + if (_batchIndex <= _finalizedBatchIndex) revert ErrorBatchIsAlreadyVerified(); + + // store in state + // @note we do not store intermediate finalized roots + lastFinalizedBatchIndex = _batchIndex; + finalizedStateRoots[_batchIndex] = _postStateRoot; + withdrawRoots[_batchIndex] = _withdrawRoot; + + emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot); + } + + /********************** + * Internal Functions * + **********************/ + + /// @dev Internal function to do common checks before actual batch committing. + /// @param _parentBatchHeader The parent batch header in calldata. + /// @param _chunks The list of chunks in memory. + /// @return _parentBatchHash The batch hash of parent batch header. + /// @return _batchIndex The index of current batch. + /// @return _totalL1MessagesPoppedOverall The total number of L1 messages popped before current batch. + function _beforeCommitBatch(bytes calldata _parentBatchHeader, bytes[] memory _chunks) + private + view + returns ( + bytes32 _parentBatchHash, + uint256 _batchIndex, + uint256 _totalL1MessagesPoppedOverall + ) + { + // check whether the batch is empty + if (_chunks.length == 0) revert ErrorBatchIsEmpty(); + (, _parentBatchHash, _batchIndex, _totalL1MessagesPoppedOverall) = _loadBatchHeader(_parentBatchHeader); + unchecked { + _batchIndex += 1; + } + if (committedBatches[_batchIndex] != 0) revert ErrorBatchIsAlreadyCommitted(); + } + + /// @dev Internal function to do common checks after actual batch committing. + /// @param _batchIndex The index of current batch. + /// @param _batchHash The hash of current batch. + function _afterCommitBatch(uint256 _batchIndex, bytes32 _batchHash) private { + committedBatches[_batchIndex] = _batchHash; + emit CommitBatch(_batchIndex, _batchHash); + } + + /// @dev Internal function to do common checks before actual batch finalization. + /// @param _batchHeader The current batch header in calldata. + /// @param _postStateRoot The state root after current batch. + /// @return batchPtr The start memory offset of current batch in memory. + /// @return _batchHash The hash of current batch. + /// @return _batchIndex The index of current batch. + function _beforeFinalizeBatch(bytes calldata _batchHeader, bytes32 _postStateRoot) + internal + view + returns ( + uint256 batchPtr, + bytes32 _batchHash, + uint256 _batchIndex + ) + { + if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero(); + + // compute batch hash and verify + (batchPtr, _batchHash, _batchIndex, ) = _loadBatchHeader(_batchHeader); // avoid duplicated verification if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified(); + } + /// @dev Internal function to do common checks after actual batch finalization. + /// @param + /// @param _batchIndex The index of current batch. + /// @param _batchHash The hash of current batch. + /// @param _postStateRoot The state root after current batch. + /// @param _withdrawRoot The withdraw trie root after current batch. + function _afterFinalizeBatch( + uint256, + uint256 _batchIndex, + bytes32 _batchHash, + bytes32 _postStateRoot, + bytes32 _withdrawRoot + ) internal { // check and update lastFinalizedBatchIndex unchecked { if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex(); @@ -291,19 +472,43 @@ contract MockBridge { emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot); } - /********************** - * Internal Functions * - **********************/ + /// @dev Internal function to check blob versioned hash. + /// @param _blobVersionedHash The blob versioned hash to check. + /// @param _blobDataProof The blob data proof used to verify the blob versioned hash. + function _checkBlobVersionedHash(bytes32 _blobVersionedHash, bytes calldata _blobDataProof) internal view { + // Calls the point evaluation precompile and verifies the output + (bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall( + abi.encodePacked(_blobVersionedHash, _blobDataProof) + ); + // We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the + // response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile + if (!success) revert ErrorCallPointEvaluationPrecompileFailed(); + (, uint256 result) = abi.decode(data, (uint256, uint256)); + if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput(); + } + + /// @dev Internal function to get the blob versioned hash. + /// @return _blobVersionedHash The retrieved blob versioned hash. + function _getBlobVersionedHash() internal virtual returns (bytes32 _blobVersionedHash) { + bytes32 _secondBlob; + // Get blob's versioned hash + assembly { + _blobVersionedHash := blobhash(0) + _secondBlob := blobhash(1) + } + if (_blobVersionedHash == bytes32(0)) revert ErrorNoBlobFound(); + if (_secondBlob != bytes32(0)) revert ErrorFoundMultipleBlobs(); + } /// @dev Internal function to commit chunks with version 0 /// @param _totalL1MessagesPoppedOverall The number of L1 messages popped before the list of chunks. /// @param _chunks The list of chunks to commit. /// @return _batchDataHash The computed data hash for the list of chunks. - /// @return _totalL1MessagesPoppedInBatch The total number of L1 messages poped in this batch, including skipped one. + /// @return _totalL1MessagesPoppedInBatch The total number of L1 messages popped in this batch, including skipped one. function _commitChunksV0( uint256 _totalL1MessagesPoppedOverall, bytes[] memory _chunks - ) internal pure returns (bytes32 _batchDataHash, uint256 _totalL1MessagesPoppedInBatch) { + ) internal view returns (bytes32 _batchDataHash, uint256 _totalL1MessagesPoppedInBatch) { uint256 _chunksLength = _chunks.length; // load `batchDataHashPtr` and reserve the memory region for chunk data hashes @@ -341,32 +546,12 @@ contract MockBridge { /// @dev Internal function to commit chunks with version 1 /// @param _totalL1MessagesPoppedOverall The number of L1 messages popped before the list of chunks. /// @param _chunks The list of chunks to commit. - /// @return _blobVersionedHash The blob versioned hash for the blob carried in this transaction. /// @return _batchDataHash The computed data hash for the list of chunks. - /// @return _totalL1MessagesPoppedInBatch The total number of L1 messages poped in this batch, including skipped one. + /// @return _totalL1MessagesPoppedInBatch The total number of L1 messages popped in this batch, including skipped one. function _commitChunksV1( uint256 _totalL1MessagesPoppedOverall, bytes[] memory _chunks - ) - internal - view - returns ( - bytes32 _blobVersionedHash, - bytes32 _batchDataHash, - uint256 _totalL1MessagesPoppedInBatch - ) - { - { - bytes32 _secondBlob; - // Get blob's versioned hash - assembly { - _blobVersionedHash := blobhash(0) - _secondBlob := blobhash(1) - } - if (_blobVersionedHash == bytes32(0)) revert ErrorNoBlobFound(); - if (_secondBlob != bytes32(0)) revert ErrorFoundMultipleBlob(); - } - + ) internal view returns (bytes32 _batchDataHash, uint256 _totalL1MessagesPoppedInBatch) { uint256 _chunksLength = _chunks.length; // load `batchDataHashPtr` and reserve the memory region for chunk data hashes @@ -424,22 +609,25 @@ contract MockBridge { version := shr(248, calldataload(_batchHeader.offset)) } - // version should be always 0 or 1 in current code uint256 _length; if (version == 0) { (batchPtr, _length) = BatchHeaderV0Codec.loadAndValidate(_batchHeader); - _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length); - _batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr); - } else { + } else if (version <= 2) { (batchPtr, _length) = BatchHeaderV1Codec.loadAndValidate(_batchHeader); - _batchHash = BatchHeaderV1Codec.computeBatchHash(batchPtr, _length); - _batchIndex = BatchHeaderV1Codec.getBatchIndex(batchPtr); + } else if (version >= 3) { + (batchPtr, _length) = BatchHeaderV3Codec.loadAndValidate(_batchHeader); } + + // the code for compute batch hash is the same for V0, V1, V2, V3 + // also the `_batchIndex` and `_totalL1MessagesPoppedOverall`. + _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length); + _batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr); + _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr); + // only check when genesis is imported if (committedBatches[_batchIndex] != _batchHash && finalizedStateRoots[0] != bytes32(0)) { revert ErrorIncorrectBatchHash(); } - _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr); } /// @dev Internal function to commit a chunk with version 0. @@ -452,7 +640,7 @@ contract MockBridge { bytes memory _chunk, uint256 _totalL1MessagesPoppedInBatch, uint256 _totalL1MessagesPoppedOverall - ) internal pure returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) { + ) internal view returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) { uint256 chunkPtr; uint256 startDataPtr; uint256 dataPtr; @@ -481,6 +669,8 @@ contract MockBridge { } } + // It is used to compute the actual number of transactions in chunk. + uint256 txHashStartDataPtr = dataPtr; // concatenate tx hashes uint256 l2TxPtr = ChunkCodecV0.getL2TxPtr(chunkPtr, _numBlocks); chunkPtr += 1; @@ -510,6 +700,9 @@ contract MockBridge { } } + // check the actual number of transactions in the chunk + if ((dataPtr - txHashStartDataPtr) / 32 > maxNumTxInChunk) revert ErrorTooManyTxsInOneChunk(); + assembly { chunkPtr := add(_chunk, 0x20) } @@ -532,7 +725,7 @@ contract MockBridge { bytes memory _chunk, uint256 _totalL1MessagesPoppedInBatch, uint256 _totalL1MessagesPoppedOverall - ) internal pure returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) { + ) internal view returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) { uint256 chunkPtr; uint256 startDataPtr; uint256 dataPtr; @@ -568,7 +761,7 @@ contract MockBridge { uint256 _numTransactionsInBlock = ChunkCodecV1.getNumTransactions(chunkPtr); if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs(); unchecked { - _totalTransactionsInChunk += dataPtr - startPtr; // number of non-skipped l1 messages + _totalTransactionsInChunk += (dataPtr - startPtr) / 32; // number of non-skipped l1 messages _totalTransactionsInChunk += _numTransactionsInBlock - _numL1MessagesInBlock; // number of l2 txs _totalL1MessagesPoppedInBatch += _numL1MessagesInBlock; _totalL1MessagesPoppedOverall += _numL1MessagesInBlock; @@ -578,6 +771,11 @@ contract MockBridge { } } + // check the actual number of transactions in the chunk + if (_totalTransactionsInChunk > maxNumTxInChunk) { + revert ErrorTooManyTxsInOneChunk(); + } + // compute data hash and store to memory assembly { _dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr)) diff --git a/rollup/tests/bridge_test.go b/rollup/tests/bridge_test.go index e4abb920fc..dc654f6cb0 100644 --- a/rollup/tests/bridge_test.go +++ b/rollup/tests/bridge_test.go @@ -204,10 +204,8 @@ func TestFunction(t *testing.T) { // l1 rollup and watch rollup events t.Run("TestCommitAndFinalizeGenesisBatch", testCommitAndFinalizeGenesisBatch) - t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch) - t.Run("TestCommitBatchAndFinalizeBatch4844", testCommitBatchAndFinalizeBatch4844) - t.Run("TestCommitBatchAndFinalizeBatchBeforeAndAfter4844", testCommitBatchAndFinalizeBatchBeforeAndAfter4844) - t.Run("TestCommitBatchAndFinalizeBatchBeforeAndAfterCompression", testCommitBatchAndFinalizeBatchBeforeAndAfterCompression) + t.Run("testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions", testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions) + t.Run("TestCommitBatchAndFinalizeBatchOrBundleCrossingAllTransitions", testCommitBatchAndFinalizeBatchOrBundleCrossingAllTransitions) // l1/l2 gas oracle t.Run("TestImportL1GasPrice", testImportL1GasPrice) diff --git a/rollup/tests/rollup_test.go b/rollup/tests/rollup_test.go index 80fa56a9fa..5785642732 100644 --- a/rollup/tests/rollup_test.go +++ b/rollup/tests/rollup_test.go @@ -52,140 +52,26 @@ func testCommitAndFinalizeGenesisBatch(t *testing.T) { assert.Equal(t, types.RollupFinalized, types.RollupStatus(batch.RollupStatus)) } -func testCommitBatchAndFinalizeBatch(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - prepareContracts(t) - - // Create L2Relayer - l2Cfg := rollupApp.Config.L2Config - l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, relayer.ServiceTypeL2RollupRelayer, nil) - assert.NoError(t, err) - defer l2Relayer.StopSenders() - - // add some blocks to db - var blocks []*encoding.Block - for i := int64(0); i < 10; i++ { - header := gethTypes.Header{ - Number: big.NewInt(i + 1), - ParentHash: common.Hash{}, - Difficulty: big.NewInt(0), - BaseFee: big.NewInt(0), - Root: common.HexToHash("0x1"), - } - blocks = append(blocks, &encoding.Block{ - Header: &header, - Transactions: nil, - WithdrawRoot: common.HexToHash("0x2"), - RowConsumption: &gethTypes.RowConsumption{}, - }) - } - - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks) - assert.NoError(t, err) - - cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: 100, - MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 50000000000, - MaxL1CommitCalldataSizePerChunk: 1000000, - MaxRowConsumptionPerChunk: 1048319, - ChunkTimeoutSec: 300, - }, ¶ms.ChainConfig{}, db, nil) - - bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: 50000000000, - MaxL1CommitCalldataSizePerBatch: 1000000, - BatchTimeoutSec: 300, - }, ¶ms.ChainConfig{}, db, nil) - - cp.TryProposeChunk() - - batchOrm := orm.NewBatch(db) - unbatchedChunkIndex, err := batchOrm.GetFirstUnbatchedChunkIndex(context.Background()) - assert.NoError(t, err) - - chunkOrm := orm.NewChunk(db) - chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), unbatchedChunkIndex, 0) - assert.NoError(t, err) - assert.Len(t, chunks, 1) - - bp.TryProposeBatch() - - l2Relayer.ProcessPendingBatches() - batch, err := batchOrm.GetLatestBatch(context.Background()) - assert.NoError(t, err) - assert.NotNil(t, batch) - - // fetch rollup events - assert.Eventually(t, func() bool { - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0] - }, 30*time.Second, time.Second) - - assert.Eventually(t, func() bool { - batch, err = batchOrm.GetLatestBatch(context.Background()) - assert.NoError(t, err) - assert.NotNil(t, batch) - assert.NotEmpty(t, batch.CommitTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful - }, 30*time.Second, time.Second) - - // add dummy proof - proof := &message.BatchProof{ - Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - } - err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100) - assert.NoError(t, err) - err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified) - assert.NoError(t, err) - - // process committed batch and check status - l2Relayer.ProcessCommittedBatches() - - statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - assert.NoError(t, err) - assert.Equal(t, 1, len(statuses)) - assert.Equal(t, types.RollupFinalizing, statuses[0]) - - // fetch rollup events - assert.Eventually(t, func() bool { - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0] - }, 30*time.Second, time.Second) - - assert.Eventually(t, func() bool { - batch, err = batchOrm.GetLatestBatch(context.Background()) - assert.NoError(t, err) - assert.NotNil(t, batch) - assert.NotEmpty(t, batch.FinalizeTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful - }, 30*time.Second, time.Second) -} - -func testCommitBatchAndFinalizeBatch4844(t *testing.T) { - compressionTests := []bool{false, true} // false for uncompressed, true for compressed - for _, compressed := range compressionTests { +func testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions(t *testing.T) { + codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} + for _, codecVersion := range codecVersions { db := setupDB(t) prepareContracts(t) - // Create L2Relayer - l2Cfg := rollupApp.Config.L2Config var chainConfig *params.ChainConfig - if compressed { + if codecVersion == encoding.CodecV0 { + chainConfig = ¶ms.ChainConfig{} + } else if codecVersion == encoding.CodecV1 { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} + } else if codecVersion == encoding.CodecV2 { chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} } else { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} } + + // Create L2Relayer + l2Cfg := rollupApp.Config.L2Config l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil) assert.NoError(t, err) @@ -207,228 +93,138 @@ func testCommitBatchAndFinalizeBatch4844(t *testing.T) { }) } - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks) - assert.NoError(t, err) - cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ MaxBlockNumPerChunk: 100, MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 1, - MaxL1CommitCalldataSizePerChunk: 100000, + MaxL1CommitGasPerChunk: 50000000000, + MaxL1CommitCalldataSizePerChunk: 1000000, MaxRowConsumptionPerChunk: 1048319, ChunkTimeoutSec: 300, MaxUncompressedBatchBytesSize: math.MaxUint64, }, chainConfig, db, nil) - bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: 1, - MaxL1CommitCalldataSizePerBatch: 100000, + bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{ + MaxL1CommitGasPerBatch: 50000000000, + MaxL1CommitCalldataSizePerBatch: 1000000, BatchTimeoutSec: 300, MaxUncompressedBatchBytesSize: math.MaxUint64, }, chainConfig, db, nil) - cp.TryProposeChunk() + bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{ + MaxBatchNumPerBundle: 1000000, + BundleTimeoutSec: 300, + }, chainConfig, db, nil) - batchOrm := orm.NewBatch(db) - unbatchedChunkIndex, err := batchOrm.GetFirstUnbatchedChunkIndex(context.Background()) + l2BlockOrm := orm.NewL2Block(db) + err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks[:5]) assert.NoError(t, err) - chunkOrm := orm.NewChunk(db) - chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), unbatchedChunkIndex, 0) + cp.TryProposeChunk() + bap.TryProposeBatch() + + err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks[5:]) assert.NoError(t, err) - assert.Len(t, chunks, 1) - bp.TryProposeBatch() + cp.TryProposeChunk() + bap.TryProposeBatch() + + bup.TryProposeBundle() // The proposed bundle contains two batches when codec version is codecv3. l2Relayer.ProcessPendingBatches() - batch, err := batchOrm.GetLatestBatch(context.Background()) - assert.NoError(t, err) - assert.NotNil(t, batch) - // fetch rollup events - assert.Eventually(t, func() bool { - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0] - }, 30*time.Second, time.Second) + batchOrm := orm.NewBatch(db) + bundleOrm := orm.NewBundle(db) assert.Eventually(t, func() bool { - batch, err = batchOrm.GetLatestBatch(context.Background()) - assert.NoError(t, err) - assert.NotNil(t, batch) - assert.NotEmpty(t, batch.CommitTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful + batches, getErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) + assert.NoError(t, getErr) + assert.Len(t, batches, 3) + batches = batches[1:] + for _, batch := range batches { + if types.RollupCommitted != types.RollupStatus(batch.RollupStatus) { + return false + } + } + return true }, 30*time.Second, time.Second) - // add dummy proof - proof := &message.BatchProof{ - Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + batchProof := &message.BatchProof{ + Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, } - err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100) - assert.NoError(t, err) - err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified) - assert.NoError(t, err) - - // process committed batch and check status - l2Relayer.ProcessCommittedBatches() - - statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) assert.NoError(t, err) - assert.Equal(t, 1, len(statuses)) - assert.Equal(t, types.RollupFinalizing, statuses[0]) - - // fetch rollup events - assert.Eventually(t, func() bool { - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0] - }, 30*time.Second, time.Second) - - assert.Eventually(t, func() bool { - batch, err = batchOrm.GetLatestBatch(context.Background()) + batches = batches[1:] + for _, batch := range batches { + err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, batchProof, 100) + assert.NoError(t, err) + err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified) assert.NoError(t, err) - assert.NotNil(t, batch) - assert.NotEmpty(t, batch.FinalizeTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful - }, 30*time.Second, time.Second) - - l2Relayer.StopSenders() - database.CloseDB(db) - } -} - -func testCommitBatchAndFinalizeBatchBeforeAndAfter4844(t *testing.T) { - compressionTests := []bool{false, true} // false for uncompressed, true for compressed - for _, compressed := range compressionTests { - db := setupDB(t) - - prepareContracts(t) - - // Create L2Relayer - l2Cfg := rollupApp.Config.L2Config - var chainConfig *params.ChainConfig - if compressed { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(5), CurieBlock: big.NewInt(5)} - } else { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(5)} } - l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil) - assert.NoError(t, err) - // add some blocks to db - var blocks []*encoding.Block - for i := int64(0); i < 10; i++ { - header := gethTypes.Header{ - Number: big.NewInt(i + 1), - ParentHash: common.Hash{}, - Difficulty: big.NewInt(0), - BaseFee: big.NewInt(0), - Root: common.HexToHash("0x1"), - } - blocks = append(blocks, &encoding.Block{ - Header: &header, - Transactions: nil, - WithdrawRoot: common.HexToHash("0x2"), - RowConsumption: &gethTypes.RowConsumption{}, - }) + bundleProof := &message.BundleProof{ + Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, } - - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks) + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0) assert.NoError(t, err) - - cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: 100, - MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 50000000000, - MaxL1CommitCalldataSizePerChunk: 1000000, - MaxRowConsumptionPerChunk: 1048319, - ChunkTimeoutSec: 300, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) - - bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: 50000000000, - MaxL1CommitCalldataSizePerBatch: 1000000, - BatchTimeoutSec: 300, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) - - cp.TryProposeChunk() - cp.TryProposeChunk() - bp.TryProposeBatch() - bp.TryProposeBatch() - - for i := uint64(0); i < 2; i++ { - l2Relayer.ProcessPendingBatches() - batchOrm := orm.NewBatch(db) - batch, err := batchOrm.GetBatchByIndex(context.Background(), i+1) + for _, bundle := range bundles { + err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, bundleProof, types.ProvingTaskVerified, 100) assert.NoError(t, err) - assert.NotNil(t, batch) + } - // fetch rollup events - assert.Eventually(t, func() bool { - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0] - }, 30*time.Second, time.Second) + assert.Eventually(t, func() bool { + l2Relayer.ProcessCommittedBatches() + l2Relayer.ProcessPendingBundles() - assert.Eventually(t, func() bool { - batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1) - assert.NoError(t, err) - assert.NotNil(t, batch) - assert.NotEmpty(t, batch.CommitTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful - }, 30*time.Second, time.Second) - - // add dummy proof - proof := &message.BatchProof{ - Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - } - err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100) - assert.NoError(t, err) - err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified) + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) assert.NoError(t, err) + assert.Len(t, batches, 3) + batches = batches[1:] + for _, batch := range batches { + if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized { + return false + } - // process committed batch and check status - l2Relayer.ProcessCommittedBatches() + assert.NotEmpty(t, batch.FinalizeTxHash) + receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) + assert.NoError(t, getErr) + assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) + } - statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0) assert.NoError(t, err) - assert.Equal(t, 1, len(statuses)) - assert.Equal(t, types.RollupFinalizing, statuses[0]) - - // fetch rollup events - assert.Eventually(t, func() bool { - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0] - }, 30*time.Second, time.Second) - - assert.Eventually(t, func() bool { - batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1) + if codecVersion == encoding.CodecV0 || codecVersion == encoding.CodecV1 || codecVersion == encoding.CodecV2 { + assert.Len(t, bundles, 0) + } else { + assert.Len(t, bundles, 1) + bundle := bundles[0] + if types.RollupStatus(bundle.RollupStatus) != types.RollupFinalized { + return false + } + assert.NotEmpty(t, bundle.FinalizeTxHash) + receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(bundle.FinalizeTxHash)) assert.NoError(t, err) - assert.NotNil(t, batch) - assert.NotEmpty(t, batch.FinalizeTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful - }, 30*time.Second, time.Second) - } + assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"bundle_hash": bundle.Hash}, nil, 0) + assert.NoError(t, err) + assert.Len(t, batches, 2) + for _, batch := range batches { + assert.Equal(t, batch.RollupStatus, bundle.RollupStatus) + assert.Equal(t, bundle.FinalizeTxHash, batch.FinalizeTxHash) + } + } + return true + }, 30*time.Second, time.Second) l2Relayer.StopSenders() database.CloseDB(db) } } -func testCommitBatchAndFinalizeBatchBeforeAndAfterCompression(t *testing.T) { +func testCommitBatchAndFinalizeBatchOrBundleCrossingAllTransitions(t *testing.T) { db := setupDB(t) defer database.CloseDB(db) @@ -436,7 +232,7 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfterCompression(t *testing.T) { // Create L2Relayer l2Cfg := rollupApp.Config.L2Config - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(5)} + chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(1), CurieBlock: big.NewInt(2), DarwinTime: func() *uint64 { t := uint64(4); return &t }()} l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil) assert.NoError(t, err) defer l2Relayer.StopSenders() @@ -450,6 +246,7 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfterCompression(t *testing.T) { Difficulty: big.NewInt(0), BaseFee: big.NewInt(0), Root: common.HexToHash("0x1"), + Time: uint64(i + 1), } blocks = append(blocks, &encoding.Block{ Header: &header, @@ -473,74 +270,130 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfterCompression(t *testing.T) { MaxUncompressedBatchBytesSize: math.MaxUint64, }, chainConfig, db, nil) - bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{ + bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{ MaxL1CommitGasPerBatch: 50000000000, MaxL1CommitCalldataSizePerBatch: 1000000, BatchTimeoutSec: 300, MaxUncompressedBatchBytesSize: math.MaxUint64, }, chainConfig, db, nil) + bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{ + MaxBatchNumPerBundle: 1000000, + BundleTimeoutSec: 300, + }, chainConfig, db, nil) + + cp.TryProposeChunk() + cp.TryProposeChunk() + cp.TryProposeChunk() cp.TryProposeChunk() cp.TryProposeChunk() - bp.TryProposeBatch() - bp.TryProposeBatch() - for i := uint64(0); i < 2; i++ { - l2Relayer.ProcessPendingBatches() - batchOrm := orm.NewBatch(db) - batch, err := batchOrm.GetBatchByIndex(context.Background(), i+1) - assert.NoError(t, err) - assert.NotNil(t, batch) + bap.TryProposeBatch() + bap.TryProposeBatch() + bap.TryProposeBatch() + bap.TryProposeBatch() - // fetch rollup events - assert.Eventually(t, func() bool { - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0] - }, 30*time.Second, time.Second) + bup.TryProposeBundle() - assert.Eventually(t, func() bool { - batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1) - assert.NoError(t, err) - assert.NotNil(t, batch) - assert.NotEmpty(t, batch.CommitTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful - }, 30*time.Second, time.Second) + l2Relayer.ProcessPendingBatches() + + batchOrm := orm.NewBatch(db) + bundleOrm := orm.NewBundle(db) - // add dummy proof - proof := &message.BatchProof{ - Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + assert.Eventually(t, func() bool { + batches, getErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) + assert.NoError(t, getErr) + assert.Len(t, batches, 4) + batches = batches[1:] + for _, batch := range batches { + if types.RollupCommitted != types.RollupStatus(batch.RollupStatus) { + return false + } } - err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100) + return true + }, 30*time.Second, time.Second) + + batchProof := &message.BatchProof{ + Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + } + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) + assert.NoError(t, err) + batches = batches[1:] + for _, batch := range batches { + err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, batchProof, 600) assert.NoError(t, err) err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified) assert.NoError(t, err) + } + + bundleProof := &message.BundleProof{ + Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + } + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0) + assert.NoError(t, err) + for _, bundle := range bundles { + err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, bundleProof, types.ProvingTaskVerified, 100) + assert.NoError(t, err) + } - // process committed batch and check status + assert.Eventually(t, func() bool { l2Relayer.ProcessCommittedBatches() - statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) assert.NoError(t, err) - assert.Equal(t, 1, len(statuses)) - assert.Equal(t, types.RollupFinalizing, statuses[0]) + assert.Len(t, batches, 4) + batches = batches[1:2] + for _, batch := range batches { + if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized { + return false + } + assert.NotEmpty(t, batch.FinalizeTxHash) + receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) + assert.NoError(t, getErr) + assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) + } + return true + }, 30*time.Second, time.Second) - // fetch rollup events - assert.Eventually(t, func() bool { - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0] - }, 30*time.Second, time.Second) + assert.Eventually(t, func() bool { + l2Relayer.ProcessPendingBundles() - assert.Eventually(t, func() bool { - batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1) - assert.NoError(t, err) - assert.NotNil(t, batch) + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) + assert.NoError(t, err) + assert.Len(t, batches, 4) + batches = batches[3:] + for _, batch := range batches { + if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized { + return false + } assert.NotEmpty(t, batch.FinalizeTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful - }, 30*time.Second, time.Second) - } + receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) + assert.NoError(t, getErr) + assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) + } + + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0) + assert.NoError(t, err) + assert.Len(t, bundles, 1) + bundle := bundles[0] + if types.RollupStatus(bundle.RollupStatus) != types.RollupFinalized { + return false + } + assert.NotEmpty(t, bundle.FinalizeTxHash) + receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(bundle.FinalizeTxHash)) + assert.NoError(t, err) + assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) + batches, err = batchOrm.GetBatches(context.Background(), map[string]interface{}{"bundle_hash": bundle.Hash}, nil, 0) + assert.NoError(t, err) + assert.Len(t, batches, 1) + for _, batch := range batches { + assert.Equal(t, batch.RollupStatus, bundle.RollupStatus) + assert.Equal(t, bundle.FinalizeTxHash, batch.FinalizeTxHash) + } + return true + }, 30*time.Second, time.Second) } diff --git a/scroll-contracts b/scroll-contracts index ca7f0768b6..2ac4f3f7e0 160000 --- a/scroll-contracts +++ b/scroll-contracts @@ -1 +1 @@ -Subproject commit ca7f0768b6640dc10b19f3d4da3943a87bdf11b1 +Subproject commit 2ac4f3f7e090d7127db4b13b3627cb3ce2d762bc diff --git a/tests/integration-test/genesis.json b/tests/integration-test/genesis.json index b1887cb073..5de1c798b7 100644 --- a/tests/integration-test/genesis.json +++ b/tests/integration-test/genesis.json @@ -14,6 +14,9 @@ "londonBlock": 0, "archimedesBlock": 0, "shanghaiBlock": 0, + "bernoulliBlock": 0, + "curieBlock": 0, + "darwinTime": 0, "clique": { "period": 3, "epoch": 30000 diff --git a/tests/integration-test/go.mod b/tests/integration-test/go.mod index 008f236b57..2ba5a8a8c9 100644 --- a/tests/integration-test/go.mod +++ b/tests/integration-test/go.mod @@ -3,42 +3,41 @@ module scroll-tech/integration-test go 1.21 require ( - github.com/scroll-tech/da-codec v0.0.0-20240429123441-4aaf9d35e570 - github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea + github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb + github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 github.com/stretchr/testify v1.9.0 gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde ) require ( - github.com/bits-and-blooms/bitset v1.12.0 // indirect + github.com/bits-and-blooms/bitset v1.13.0 // indirect github.com/btcsuite/btcd v0.20.1-beta // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/gnark-crypto v0.12.1 // indirect github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/deckarep/golang-set v1.8.0 // indirect - github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect + github.com/ethereum/c-kzg-4844 v1.0.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-stack/stack v1.8.1 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/holiman/uint256 v1.2.4 // indirect - github.com/iden3/go-iden3-crypto v0.0.15 // indirect + github.com/iden3/go-iden3-crypto v0.0.16 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rjeczalik/notify v0.9.1 // indirect - github.com/scroll-tech/zktrie v0.8.2 // indirect + github.com/scroll-tech/zktrie v0.8.4 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect - github.com/supranational/blst v0.3.11 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.17.0 // indirect + github.com/supranational/blst v0.3.12 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.21.0 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/yaml.v3 v3.0.1 // indirect rsc.io/tmplfunc v0.0.3 // indirect diff --git a/tests/integration-test/go.sum b/tests/integration-test/go.sum index a076f0078e..4a182c7307 100644 --- a/tests/integration-test/go.sum +++ b/tests/integration-test/go.sum @@ -1,8 +1,8 @@ github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA= -github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= +github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= @@ -29,8 +29,8 @@ github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsP github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA= -github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c= +github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s= +github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= @@ -52,8 +52,8 @@ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iU github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4= -github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= +github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk= +github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= @@ -89,40 +89,38 @@ github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeC github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/scroll-tech/da-codec v0.0.0-20240429123441-4aaf9d35e570 h1:2oA2bAFPQXDZcUK8TA9qd5zj6AsURpHyBaAha5goP0c= -github.com/scroll-tech/da-codec v0.0.0-20240429123441-4aaf9d35e570/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM= -github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls= -github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA= -github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ= -github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= +github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA= +github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= +github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ= +github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ= +github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= +github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= -github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8= +github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=