diff --git a/.github/workflows/config.yml b/.github/workflows/config.yml index 89fb2de8531d..5a35895b9bbc 100644 --- a/.github/workflows/config.yml +++ b/.github/workflows/config.yml @@ -17,6 +17,7 @@ on: - production-sokol - production-suave - production-xdai + - production-via - production-zkevm - production-zksync - staging-l2 @@ -65,6 +66,7 @@ jobs: "stability", "zetachain", "zilliqa", + "via", "zksync", "neon" ]; diff --git a/.github/workflows/pre-release-via.yml b/.github/workflows/pre-release-via.yml new file mode 100644 index 000000000000..39fcae1d8259 --- /dev/null +++ b/.github/workflows/pre-release-via.yml @@ -0,0 +1,62 @@ +name: Pre-release for Via + +on: + workflow_dispatch: + inputs: + number: + type: number + required: true + +env: + OTP_VERSION: ${{ vars.OTP_VERSION }} + ELIXIR_VERSION: ${{ vars.ELIXIR_VERSION }} + +jobs: + push_to_registry: + name: Push Docker image to GitHub Container Registry + runs-on: ubuntu-latest + env: + RELEASE_VERSION: 8.0.2 + steps: + - uses: actions/checkout@v4 + - name: Setup repo + uses: ./.github/actions/setup-repo + id: setup + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + docker-remote-multi-platform: true + docker-arm-host: ${{ secrets.ARM_RUNNER_HOSTNAME }} + docker-arm-host-key: ${{ secrets.ARM_RUNNER_KEY }} + + - name: Build and push Docker image for Via (indexer + API) + uses: docker/build-push-action@v6 + with: + context: . + file: ./docker/Dockerfile + push: true + tags: ghcr.io/blockscout/blockscout-via:${{ env.RELEASE_VERSION }}-alpha.${{ inputs.number }} + labels: ${{ steps.setup.outputs.docker-labels }} + platforms: | + linux/amd64 + linux/arm64/v8 + build-args: | + BLOCKSCOUT_VERSION=v${{ env.RELEASE_VERSION }}-alpha.${{ inputs.number }} + RELEASE_VERSION=${{ env.RELEASE_VERSION }} + CHAIN_TYPE=via + + - name: Build and push Docker image for Via (indexer) + uses: docker/build-push-action@v6 + with: + context: . + file: ./docker/Dockerfile + push: true + tags: ghcr.io/blockscout/blockscout-via:${{ env.RELEASE_VERSION }}-alpha.${{ inputs.number }}-indexer + labels: ${{ steps.setup.outputs.docker-labels }} + platforms: | + linux/amd64 + linux/arm64/v8 + build-args: | + DISABLE_API=true + BLOCKSCOUT_VERSION=v${{ env.RELEASE_VERSION }}-alpha.${{ inputs.number }} + RELEASE_VERSION=${{ env.RELEASE_VERSION }} + CHAIN_TYPE=via diff --git a/.github/workflows/publish-docker-image-for-via.yml b/.github/workflows/publish-docker-image-for-via.yml new file mode 100644 index 000000000000..fefa23f4112b --- /dev/null +++ b/.github/workflows/publish-docker-image-for-via.yml @@ -0,0 +1,57 @@ +name: Via publish Docker image + +on: + push: + branches: + - production-via +jobs: + push_to_registry: + name: Push Docker image to GitHub Container Registry + runs-on: ubuntu-latest + env: + RELEASE_VERSION: 8.0.2 + DOCKER_CHAIN_NAME: via + steps: + - uses: actions/checkout@v4 + - name: Setup repo + uses: ./.github/actions/setup-repo + id: setup + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + docker-remote-multi-platform: true + docker-arm-host: ${{ secrets.ARM_RUNNER_HOSTNAME }} + docker-arm-host-key: ${{ secrets.ARM_RUNNER_KEY }} + + - name: Build and push Docker image (indexer + API) + uses: docker/build-push-action@v6 + with: + context: . + file: ./docker/Dockerfile + push: true + tags: ghcr.io/blockscout/blockscout-${{ env.DOCKER_CHAIN_NAME }}:${{ env.RELEASE_VERSION }}-postrelease-${{ env.SHORT_SHA }} + labels: ${{ steps.setup.outputs.docker-labels }} + platforms: | + linux/amd64 + linux/arm64/v8 + build-args: | + BLOCKSCOUT_VERSION=v${{ env.RELEASE_VERSION }}.+commit.${{ env.SHORT_SHA }} + RELEASE_VERSION=${{ env.RELEASE_VERSION }} + CHAIN_TYPE=via + + - name: Build and push Docker image (indexer) + uses: docker/build-push-action@v6 + with: + context: . + file: ./docker/Dockerfile + push: true + tags: ghcr.io/blockscout/blockscout-${{ env.DOCKER_CHAIN_NAME }}:${{ env.RELEASE_VERSION }}-postrelease-${{ env.SHORT_SHA }}-indexer + labels: ${{ steps.setup.outputs.docker-labels }} + platforms: | + linux/amd64 + linux/arm64/v8 + build-args: | + DISABLE_API=true + BLOCKSCOUT_VERSION=v${{ env.RELEASE_VERSION }}.+commit.${{ env.SHORT_SHA }} + RELEASE_VERSION=${{ env.RELEASE_VERSION }} + CHAIN_TYPE=via + diff --git a/.github/workflows/release-via.yml b/.github/workflows/release-via.yml new file mode 100644 index 000000000000..b03112dc7b49 --- /dev/null +++ b/.github/workflows/release-via.yml @@ -0,0 +1,60 @@ +name: Release for Via + +on: + workflow_dispatch: + release: + types: [published] + +env: + OTP_VERSION: ${{ vars.OTP_VERSION }} + ELIXIR_VERSION: ${{ vars.ELIXIR_VERSION }} + +jobs: + push_to_registry: + name: Push Docker image to GitHub Container Registry + runs-on: ubuntu-latest + env: + RELEASE_VERSION: 8.0.2 + steps: + - uses: actions/checkout@v4 + - name: Setup repo + uses: ./.github/actions/setup-repo + id: setup + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + docker-remote-multi-platform: true + docker-arm-host: ${{ secrets.ARM_RUNNER_HOSTNAME }} + docker-arm-host-key: ${{ secrets.ARM_RUNNER_KEY }} + + - name: Build and push Docker image for Via (indexer + API) + uses: docker/build-push-action@v6 + with: + context: . + file: ./docker/Dockerfile + push: true + tags: ghcr.io/blockscout/blockscout-via:latest, ghcr.io/blockscout/blockscout-via:${{ env.RELEASE_VERSION }} + labels: ${{ steps.setup.outputs.docker-labels }} + platforms: | + linux/amd64 + linux/arm64/v8 + build-args: | + BLOCKSCOUT_VERSION=v${{ env.RELEASE_VERSION }} + RELEASE_VERSION=${{ env.RELEASE_VERSION }} + CHAIN_TYPE=via + + - name: Build and push Docker image for Via (indexer) + uses: docker/build-push-action@v6 + with: + context: . + file: ./docker/Dockerfile + push: true + tags: ghcr.io/blockscout/blockscout-via:${{ env.RELEASE_VERSION }}-indexer + labels: ${{ steps.setup.outputs.docker-labels }} + platforms: | + linux/amd64 + linux/arm64/v8 + build-args: | + DISABLE_API=true + BLOCKSCOUT_VERSION=v${{ env.RELEASE_VERSION }} + RELEASE_VERSION=${{ env.RELEASE_VERSION }} + CHAIN_TYPE=via diff --git a/apps/block_scout_web/lib/block_scout_web/controllers/api/health_controller.ex b/apps/block_scout_web/lib/block_scout_web/controllers/api/health_controller.ex index 885446775e58..92cecd7e363f 100644 --- a/apps/block_scout_web/lib/block_scout_web/controllers/api/health_controller.ex +++ b/apps/block_scout_web/lib/block_scout_web/controllers/api/health_controller.ex @@ -8,7 +8,7 @@ defmodule BlockScoutWeb.API.HealthController do @ok_message "OK" @backfill_multichain_search_db_migration_name "backfill_multichain_search_db" - @rollups [:arbitrum, :zksync, :optimism, :polygon_zkevm, :scroll] + @rollups [:arbitrum, :zksync, :via, :optimism, :polygon_zkevm, :scroll] @doc """ Handles health checks for the application. diff --git a/apps/block_scout_web/lib/block_scout_web/controllers/api/rpc/contract_controller.ex b/apps/block_scout_web/lib/block_scout_web/controllers/api/rpc/contract_controller.ex index c7a1074ac6b0..24eac3553879 100644 --- a/apps/block_scout_web/lib/block_scout_web/controllers/api/rpc/contract_controller.ex +++ b/apps/block_scout_web/lib/block_scout_web/controllers/api/rpc/contract_controller.ex @@ -18,7 +18,7 @@ defmodule BlockScoutWeb.API.RPC.ContractController do alias Explorer.SmartContract.Vyper.Publisher, as: VyperPublisher alias Explorer.ThirdPartyIntegrations.Sourcify - if @chain_type == :zksync do + if @chain_type == :zksync || @chain_type == :via do @optimization_runs "0" else @optimization_runs 200 @@ -634,6 +634,10 @@ defmodule BlockScoutWeb.API.RPC.ContractController do do: optional_param(&1, params, "zksolcVersion", "zk_compiler_version"), else: &1 )).() + |> (&if(Application.get_env(:explorer, :chain_type) == :via, + do: optional_param(&1, params, "zksolcVersion", "zk_compiler_version"), + else: &1 + )).() end defp fetch_verifysourcecode_solidity_single_file_params(params) do diff --git a/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/block_controller.ex b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/block_controller.ex index 0cd1c39b4b75..2533ad46e084 100644 --- a/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/block_controller.ex +++ b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/block_controller.ex @@ -60,6 +60,15 @@ defmodule BlockScoutWeb.API.V2.BlockController do :op_frame_sequence => :optional } + :via -> + @chain_type_transaction_necessity_by_association %{} + @chain_type_block_necessity_by_association %{ + :via_batch => :optional, + :via_commit_transaction => :optional, + :via_prove_transaction => :optional, + :via_execute_transaction => :optional + } + :zksync -> @chain_type_transaction_necessity_by_association %{} @chain_type_block_necessity_by_association %{ diff --git a/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/transaction_controller.ex b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/transaction_controller.ex index 848d82304390..ab3c5a8b944b 100644 --- a/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/transaction_controller.ex +++ b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/transaction_controller.ex @@ -50,6 +50,7 @@ defmodule BlockScoutWeb.API.V2.TransactionController do alias Explorer.Chain.Scroll.Reader, as: ScrollReader alias Explorer.Chain.Token.Instance alias Explorer.Chain.ZkSync.Reader, as: ZkSyncReader + alias Explorer.Chain.Via.Reader, as: ViaReader alias Indexer.Fetcher.OnDemand.FirstTrace, as: FirstTraceOnDemand alias Indexer.Fetcher.OnDemand.NeonSolanaTransactions, as: NeonSolanaTransactions @@ -148,6 +149,13 @@ defmodule BlockScoutWeb.API.V2.TransactionController do |> Map.put(:zksync_prove_transaction, :optional) |> Map.put(:zksync_execute_transaction, :optional) + :via -> + necessity_by_association_with_actions + |> Map.put(:via_batch, :optional) + |> Map.put(:via_commit_transaction, :optional) + |> Map.put(:via_prove_transaction, :optional) + |> Map.put(:via_execute_transaction, :optional) + :arbitrum -> necessity_by_association_with_actions |> Map.put(:arbitrum_batch, :optional) @@ -252,6 +260,15 @@ defmodule BlockScoutWeb.API.V2.TransactionController do handle_batch_transactions(conn, params, &ZkSyncReader.batch_transactions/2) end + @doc """ + Function to handle GET requests to `/api/v2/transactions/via-batch/:batch_number` endpoint. + It renders the list of L2 transactions bound to the specified batch. + """ + @spec via_batch(Plug.Conn.t(), map()) :: Plug.Conn.t() + def via_batch(conn, params) do + handle_batch_transactions(conn, params, &ViaReader.batch_transactions/2) + end + @doc """ Function to handle GET requests to `/api/v2/transactions/arbitrum-batch/:batch_number` endpoint. It renders the list of L2 transactions bound to the specified batch. diff --git a/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/verification_controller.ex b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/verification_controller.ex index 442b38f82c9e..c5cd4439eabd 100644 --- a/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/verification_controller.ex +++ b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/verification_controller.ex @@ -22,7 +22,7 @@ defmodule BlockScoutWeb.API.V2.VerificationController do @sc_verification_started "Smart-contract verification started" @zk_optimization_modes ["0", "1", "2", "3", "s", "z"] - if @chain_type == :zksync do + if @chain_type == :zksync || @chain_type == :via do @optimization_runs "0" else @optimization_runs 200 @@ -55,7 +55,7 @@ defmodule BlockScoutWeb.API.V2.VerificationController do end defp get_verification_options do - if Application.get_env(:explorer, :chain_type) == :zksync do + if Application.get_env(:explorer, :chain_type) == :zksync || Application.get_env(:explorer, :chain_type) == :via do ["standard-input"] else ["flattened-code", "standard-input", "vyper-code"] @@ -75,7 +75,7 @@ defmodule BlockScoutWeb.API.V2.VerificationController do end defp maybe_add_zk_options(config) do - if Application.get_env(:explorer, :chain_type) == :zksync do + if Application.get_env(:explorer, :chain_type) == :zksync || Application.get_env(:explorer, :chain_type) == :via do zk_compiler_versions = CompilerVersion.fetch_version_list(:zk) config @@ -148,7 +148,9 @@ defmodule BlockScoutWeb.API.V2.VerificationController do |> Map.put("constructor_arguments", Map.get(params, "constructor_args", "")) |> Map.put("name", Map.get(params, "contract_name", "")) |> Map.put("license_type", Map.get(params, "license_type")) - |> (&if(Application.get_env(:explorer, :chain_type) == :zksync, + |> (&if( + Application.get_env(:explorer, :chain_type) == :zksync || + Application.get_env(:explorer, :chain_type) == :via, do: Map.put(&1, "zk_compiler_version", Map.get(params, "zk_compiler_version")), else: &1 )).() diff --git a/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/via_controller.ex b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/via_controller.ex new file mode 100644 index 000000000000..977c5f7297e3 --- /dev/null +++ b/apps/block_scout_web/lib/block_scout_web/controllers/api/v2/via_controller.ex @@ -0,0 +1,113 @@ +defmodule BlockScoutWeb.API.V2.ViaController do + use BlockScoutWeb, :controller + + import BlockScoutWeb.Chain, + only: [ + next_page_params: 4, + paging_options: 1, + split_list_by_page: 1 + ] + + alias Explorer.Chain.Via.{Reader, TransactionBatch} + + action_fallback(BlockScoutWeb.API.V2.FallbackController) + + @batch_necessity_by_association %{ + :commit_transaction => :optional, + :prove_transaction => :optional, + :execute_transaction => :optional + } + + @doc """ + Function to handle GET requests to `/api/v2/via/batches/:batch_number` endpoint. + """ + @spec batch(Plug.Conn.t(), map()) :: Plug.Conn.t() + def batch(conn, %{"batch_number" => batch_number} = _params) do + case Reader.batch( + batch_number, + necessity_by_association: @batch_necessity_by_association, + api?: true + ) do + {:ok, batch} -> + conn + |> put_status(200) + |> render(:via_batch, %{batch: batch}) + + {:error, :not_found} = res -> + res + end + end + + @doc """ + Function to handle GET requests to `/api/v2/via/batches` endpoint. + """ + @spec batches(Plug.Conn.t(), map()) :: Plug.Conn.t() + def batches(conn, params) do + {batches, next_page} = + params + |> paging_options() + |> Keyword.put(:necessity_by_association, @batch_necessity_by_association) + |> Keyword.put(:api?, true) + |> Reader.batches() + |> split_list_by_page() + + next_page_params = + next_page_params( + next_page, + batches, + params, + fn %TransactionBatch{number: number} -> %{"number" => number} end + ) + + conn + |> put_status(200) + |> render(:via_batches, %{ + batches: batches, + next_page_params: next_page_params + }) + end + + @doc """ + Function to handle GET requests to `/api/v2/via/batches/count` endpoint. + """ + @spec batches_count(Plug.Conn.t(), map()) :: Plug.Conn.t() + def batches_count(conn, _params) do + conn + |> put_status(200) + |> render(:via_batches_count, %{count: Reader.batches_count(api?: true)}) + end + + @doc """ + Function to handle GET requests to `/api/v2/main-page/via/batches/confirmed` endpoint. + """ + @spec batches_confirmed(Plug.Conn.t(), map()) :: Plug.Conn.t() + def batches_confirmed(conn, _params) do + batches = + [] + |> Keyword.put(:necessity_by_association, @batch_necessity_by_association) + |> Keyword.put(:api?, true) + |> Keyword.put(:confirmed?, true) + |> Reader.batches() + + conn + |> put_status(200) + |> render(:via_batches, %{batches: batches}) + end + + @doc """ + Function to handle GET requests to `/api/v2/main-page/via/batches/latest-number` endpoint. + """ + @spec batch_latest_number(Plug.Conn.t(), map()) :: Plug.Conn.t() + def batch_latest_number(conn, _params) do + conn + |> put_status(200) + |> render(:via_batch_latest_number, %{number: batch_latest_number()}) + end + + defp batch_latest_number do + case Reader.batch(:latest, api?: true) do + {:ok, batch} -> batch.number + {:error, :not_found} -> 0 + end + end +end diff --git a/apps/block_scout_web/lib/block_scout_web/graphql/schema/types.ex b/apps/block_scout_web/lib/block_scout_web/graphql/schema/types.ex index bcb49f5826db..be8de953f592 100644 --- a/apps/block_scout_web/lib/block_scout_web/graphql/schema/types.ex +++ b/apps/block_scout_web/lib/block_scout_web/graphql/schema/types.ex @@ -67,6 +67,14 @@ defmodule BlockScoutWeb.GraphQL.Schema.SmartContracts do use Utils.CompileTimeEnvHelper, chain_type: [:explorer, :chain_type] case @chain_type do + :via -> + @chain_type_fields quote( + do: [ + field(:optimization_runs, :string), + field(:zk_compiler_version, :string) + ] + ) + :zksync -> @chain_type_fields quote( do: [ diff --git a/apps/block_scout_web/lib/block_scout_web/routers/api_router.ex b/apps/block_scout_web/lib/block_scout_web/routers/api_router.ex index 256f7cc5274a..1e151888042f 100644 --- a/apps/block_scout_web/lib/block_scout_web/routers/api_router.ex +++ b/apps/block_scout_web/lib/block_scout_web/routers/api_router.ex @@ -156,6 +156,10 @@ defmodule BlockScoutWeb.Routers.ApiRouter do get("/zksync-batch/:batch_number", V2.TransactionController, :zksync_batch) end + if @chain_type == :via do + get("/via-batch/:batch_number", V2.TransactionController, :via_batch) + end + if @chain_type == :arbitrum do get("/arbitrum-batch/:batch_number", V2.TransactionController, :arbitrum_batch) end @@ -270,6 +274,11 @@ defmodule BlockScoutWeb.Routers.ApiRouter do get("/zksync/batches/latest-number", V2.ZkSyncController, :batch_latest_number) end + if @chain_type == :via do + get("/via/batches/confirmed", V2.ViaController, :batches_confirmed) + get("/via/batches/latest-number", V2.ViaController, :batch_latest_number) + end + if @chain_type == :arbitrum do get("/arbitrum/messages/to-rollup", V2.ArbitrumController, :recent_messages_to_l2) get("/arbitrum/batches/committed", V2.ArbitrumController, :batches_committed) @@ -436,6 +445,14 @@ defmodule BlockScoutWeb.Routers.ApiRouter do end end + scope "/via" do + if @chain_type == :via do + get("/batches", V2.ViaController, :batches) + get("/batches/count", V2.ViaController, :batches_count) + get("/batches/:batch_number", V2.ViaController, :batch) + end + end + scope "/mud" do pipe_through(:mud) get("/worlds", V2.MudController, :worlds) diff --git a/apps/block_scout_web/lib/block_scout_web/routers/smart_contracts_api_v2_router.ex b/apps/block_scout_web/lib/block_scout_web/routers/smart_contracts_api_v2_router.ex index 0218d273994e..6b340f96b173 100644 --- a/apps/block_scout_web/lib/block_scout_web/routers/smart_contracts_api_v2_router.ex +++ b/apps/block_scout_web/lib/block_scout_web/routers/smart_contracts_api_v2_router.ex @@ -67,7 +67,7 @@ defmodule BlockScoutWeb.Routers.SmartContractsApiV2Router do post("/standard-input", V2.VerificationController, :verification_via_standard_input) - if @chain_type !== :zksync do + if @chain_type !== :zksync && @chain_type !== :via do post("/flattened-code", V2.VerificationController, :verification_via_flattened_code) post("/sourcify", V2.VerificationController, :verification_via_sourcify) post("/multi-part", V2.VerificationController, :verification_via_multi_part) diff --git a/apps/block_scout_web/lib/block_scout_web/templates/common_components/_bitcoin_explorer_link.html.eex b/apps/block_scout_web/lib/block_scout_web/templates/common_components/_bitcoin_explorer_link.html.eex new file mode 100644 index 000000000000..4cc7ad39ff72 --- /dev/null +++ b/apps/block_scout_web/lib/block_scout_web/templates/common_components/_bitcoin_explorer_link.html.eex @@ -0,0 +1,5 @@ +<% bitcoin_explorer_url = Application.get_env(:block_scout_web, :bitcoin_explorer_url) %> +<% bitcoin_link = bitcoin_explorer_url <> "/" <> @transaction_hash %> + + Open in Bitcoin Explorer <%= render BlockScoutWeb.IconsView, "_external_link.html" %> + \ No newline at end of file diff --git a/apps/block_scout_web/lib/block_scout_web/views/api/rpc/contract_view.ex b/apps/block_scout_web/lib/block_scout_web/views/api/rpc/contract_view.ex index 02cbbe6bc96c..66eb70bf4079 100644 --- a/apps/block_scout_web/lib/block_scout_web/views/api/rpc/contract_view.ex +++ b/apps/block_scout_web/lib/block_scout_web/views/api/rpc/contract_view.ex @@ -163,11 +163,21 @@ defmodule BlockScoutWeb.API.RPC.ContractView do |> Map.put_new(:FileName, Map.get(contract, :file_path, "") || "") |> insert_additional_sources(address) |> add_zksync_info(contract) + |> add_via_info(contract) end end defp add_zksync_info(smart_contract_info, contract) do - if Application.get_env(:explorer, :chain_type) == :zksync do + if Application.get_env(:explorer, :chain_type) == :zksync || Application.get_env(:explorer, :chain_type) == :via do + smart_contract_info + |> Map.put_new(:ZkCompilerVersion, Map.get(contract, :zk_compiler_version, "")) + else + smart_contract_info + end + end + + defp add_via_info(smart_contract_info, contract) do + if Application.get_env(:explorer, :chain_type) == :via do smart_contract_info |> Map.put_new(:ZkCompilerVersion, Map.get(contract, :zk_compiler_version, "")) else @@ -227,6 +237,7 @@ defmodule BlockScoutWeb.API.RPC.ContractView do smart_contract_info |> merge_zksync_info(contract) + |> merge_via_info(contract) end defp merge_zksync_info(smart_contract_info, contract) do @@ -238,6 +249,15 @@ defmodule BlockScoutWeb.API.RPC.ContractView do end end + defp merge_via_info(smart_contract_info, contract) do + if Application.get_env(:explorer, :chain_type) == :via do + smart_contract_info + |> Map.merge(%{"ZkCompilerVersion" => contract.zk_compiler_version}) + else + smart_contract_info + end + end + defp address_to_response(address) do creator_hash = AddressView.from_address_hash(address) creation_transaction = creator_hash && AddressView.transaction_hash(address) diff --git a/apps/block_scout_web/lib/block_scout_web/views/api/v2/block_view.ex b/apps/block_scout_web/lib/block_scout_web/views/api/v2/block_view.ex index 7c2510dc2f1e..24f40f0d573c 100644 --- a/apps/block_scout_web/lib/block_scout_web/views/api/v2/block_view.ex +++ b/apps/block_scout_web/lib/block_scout_web/views/api/v2/block_view.ex @@ -140,6 +140,16 @@ defmodule BlockScoutWeb.API.V2.BlockView do end end + :via -> + defp chain_type_fields(result, block, single_block?) do + if single_block? do + # credo:disable-for-next-line Credo.Check.Design.AliasUsage + BlockScoutWeb.API.V2.ViaView.extend_block_json_response(result, block) + else + result + end + end + :arbitrum -> defp chain_type_fields(result, block, single_block?) do if single_block? do diff --git a/apps/block_scout_web/lib/block_scout_web/views/api/v2/smart_contract_view.ex b/apps/block_scout_web/lib/block_scout_web/views/api/v2/smart_contract_view.ex index 4a1295728936..fbe60809e810 100644 --- a/apps/block_scout_web/lib/block_scout_web/views/api/v2/smart_contract_view.ex +++ b/apps/block_scout_web/lib/block_scout_web/views/api/v2/smart_contract_view.ex @@ -420,6 +420,12 @@ defmodule BlockScoutWeb.API.V2.SmartContractView do |> Map.put("zk_compiler_version", target_contract.zk_compiler_version) end + :via -> + defp chain_type_fields(result, %{target_contract: target_contract}, _single?) do + result + |> Map.put("zk_compiler_version", target_contract.zk_compiler_version) + end + _ -> defp chain_type_fields(result, _params, _single?) do result diff --git a/apps/block_scout_web/lib/block_scout_web/views/api/v2/transaction_view.ex b/apps/block_scout_web/lib/block_scout_web/views/api/v2/transaction_view.ex index f9420d9a12a7..9c8c61eed65f 100644 --- a/apps/block_scout_web/lib/block_scout_web/views/api/v2/transaction_view.ex +++ b/apps/block_scout_web/lib/block_scout_web/views/api/v2/transaction_view.ex @@ -886,6 +886,11 @@ defmodule BlockScoutWeb.API.V2.TransactionView do BlockScoutWeb.API.V2.ZkSyncView.extend_transaction_json_response(result, transaction) end + defp do_with_chain_type_fields(:via, result, transaction, true = _single_transaction?, _conn, _watchlist_names) do + # credo:disable-for-next-line Credo.Check.Design.AliasUsage + BlockScoutWeb.API.V2.ViaView.extend_transaction_json_response(result, transaction) + end + defp do_with_chain_type_fields(:arbitrum, result, transaction, true = _single_transaction?, _conn, _watchlist_names) do # credo:disable-for-next-line Credo.Check.Design.AliasUsage BlockScoutWeb.API.V2.ArbitrumView.extend_transaction_json_response(result, transaction) diff --git a/apps/block_scout_web/lib/block_scout_web/views/api/v2/via_view.ex b/apps/block_scout_web/lib/block_scout_web/views/api/v2/via_view.ex new file mode 100644 index 000000000000..456afa0b5d91 --- /dev/null +++ b/apps/block_scout_web/lib/block_scout_web/views/api/v2/via_view.ex @@ -0,0 +1,207 @@ +defmodule BlockScoutWeb.API.V2.ViaView do + use BlockScoutWeb, :view + + alias Explorer.Chain.{Block, Transaction} + alias Explorer.Chain.Via.TransactionBatch + + alias BlockScoutWeb.API.V2.Helper, as: APIV2Helper + + @doc """ + Function to render GET requests to `/api/v2/via/batches/:batch_number` endpoint. + """ + @spec render(binary(), map()) :: map() | non_neg_integer() + def render("via_batch.json", %{batch: batch}) do + %{ + "number" => batch.number, + "timestamp" => batch.timestamp, + "root_hash" => batch.root_hash, + "l1_transactions_count" => batch.l1_transaction_count, + # todo: It should be removed in favour `l1_transactions_count` property with the next release after 8.0.0 + "l1_transaction_count" => batch.l1_transaction_count, + "l2_transactions_count" => batch.l2_transaction_count, + # todo: It should be removed in favour `l2_transactions_count` property with the next release after 8.0.0 + "l2_transaction_count" => batch.l2_transaction_count, + "l1_gas_price" => batch.l1_gas_price, + "l2_fair_gas_price" => batch.l2_fair_gas_price, + "start_block_number" => batch.start_block, + "end_block_number" => batch.end_block, + # todo: It should be removed in favour `start_block_number` property with the next release after 8.0.0 + "start_block" => batch.start_block, + # todo: It should be removed in favour `end_block_number` property with the next release after 8.0.0 + "end_block" => batch.end_block + } + |> add_l1_transactions_info_and_status(batch) + end + + @doc """ + Function to render GET requests to `/api/v2/via/batches` endpoint. + """ + def render("via_batches.json", %{ + batches: batches, + next_page_params: next_page_params + }) do + %{ + items: render_via_batches(batches), + next_page_params: next_page_params + } + end + + @doc """ + Function to render GET requests to `/api/v2/main-page/via/batches/confirmed` endpoint. + """ + def render("via_batches.json", %{batches: batches}) do + %{items: render_via_batches(batches)} + end + + @doc """ + Function to render GET requests to `/api/v2/via/batches/count` endpoint. + """ + def render("via_batches_count.json", %{count: count}) do + count + end + + @doc """ + Function to render GET requests to `/api/v2/main-page/via/batches/latest-number` endpoint. + """ + def render("via_batch_latest_number.json", %{number: number}) do + number + end + + defp render_via_batches(batches) do + Enum.map(batches, fn batch -> + %{ + "number" => batch.number, + "timestamp" => batch.timestamp, + "transactions_count" => batch.l1_transaction_count + batch.l2_transaction_count, + # todo: It should be removed in favour `transactions_count` property with the next release after 8.0.0 + "transaction_count" => batch.l1_transaction_count + batch.l2_transaction_count + } + |> add_l1_transactions_info_and_status(batch) + end) + end + + @doc """ + Extends the json output with a sub-map containing information related + via: batch number and associated L1 transactions and their timestamps. + + ## Parameters + - `out_json`: a map defining output json which will be extended + - `transaction`: transaction structure containing via related data + + ## Returns + A map extended with data related via rollup + """ + @spec extend_transaction_json_response(map(), %{ + :__struct__ => Explorer.Chain.Transaction, + optional(:via_batch) => any(), + optional(:via_commit_transaction) => any(), + optional(:via_execute_transaction) => any(), + optional(:via_prove_transaction) => any(), + optional(any()) => any() + }) :: map() + def extend_transaction_json_response(out_json, %Transaction{} = transaction) do + do_add_via_info(out_json, transaction) + end + + @doc """ + Extends the json output with a sub-map containing information related + via: batch number and associated L1 transactions and their timestamps. + + ## Parameters + - `out_json`: a map defining output json which will be extended + - `block`: block structure containing via related data + + ## Returns + A map extended with data related via rollup + """ + @spec extend_block_json_response(map(), %{ + :__struct__ => Explorer.Chain.Block, + optional(:via_batch) => any(), + optional(:via_commit_transaction) => any(), + optional(:via_execute_transaction) => any(), + optional(:via_prove_transaction) => any(), + optional(any()) => any() + }) :: map() + def extend_block_json_response(out_json, %Block{} = block) do + do_add_via_info(out_json, block) + end + + defp do_add_via_info(out_json, via_entity) do + res = + %{} + |> do_add_l1_transactions_info_and_status(%{ + batch_number: get_batch_number(via_entity), + commit_transaction: via_entity.via_commit_transaction, + prove_transaction: via_entity.via_prove_transaction, + execute_transaction: via_entity.via_execute_transaction + }) + |> Map.put("batch_number", get_batch_number(via_entity)) + + Map.put(out_json, "via", res) + end + + defp get_batch_number(via_entity) do + case Map.get(via_entity, :via_batch) do + nil -> nil + %Ecto.Association.NotLoaded{} -> nil + value -> value.number + end + end + + defp add_l1_transactions_info_and_status(out_json, %TransactionBatch{} = batch) do + do_add_l1_transactions_info_and_status(out_json, batch) + end + + defp do_add_l1_transactions_info_and_status(out_json, via_item) do + l1_transactions = get_associated_l1_transactions(via_item) + + out_json + |> Map.merge(%{ + "status" => batch_status(via_item), + "commit_transaction_hash" => APIV2Helper.get_2map_data(l1_transactions, :commit_transaction, :hash), + "commit_transaction_timestamp" => APIV2Helper.get_2map_data(l1_transactions, :commit_transaction, :ts), + "prove_transaction_hash" => APIV2Helper.get_2map_data(l1_transactions, :prove_transaction, :hash), + "prove_transaction_timestamp" => APIV2Helper.get_2map_data(l1_transactions, :prove_transaction, :ts), + "execute_transaction_hash" => APIV2Helper.get_2map_data(l1_transactions, :execute_transaction, :hash), + "execute_transaction_timestamp" => APIV2Helper.get_2map_data(l1_transactions, :execute_transaction, :ts) + }) + end + + # Extract transaction hash and timestamp for L1 transactions associated with + # a via rollup entity: batch, transaction or block. + # + # ## Parameters + # - `via_item`: A batch, transaction, or block. + # + # ## Returns + # A map containing nesting maps describing corresponding L1 transactions + defp get_associated_l1_transactions(via_item) do + [:commit_transaction, :prove_transaction, :execute_transaction] + |> Enum.reduce(%{}, fn key, l1_transactions -> + case Map.get(via_item, key) do + nil -> Map.put(l1_transactions, key, nil) + %Ecto.Association.NotLoaded{} -> Map.put(l1_transactions, key, nil) + value -> Map.put(l1_transactions, key, %{hash: value.hash, ts: value.timestamp}) + end + end) + end + + # Inspects L1 transactions of the batch to determine the batch status. + # + # ## Parameters + # - `via_item`: A batch, transaction, or block. + # + # ## Returns + # A string with one of predefined statuses + defp batch_status(via_item) do + cond do + APIV2Helper.specified?(via_item.execute_transaction) -> "Executed on L1" + APIV2Helper.specified?(via_item.prove_transaction) -> "Validated on L1" + APIV2Helper.specified?(via_item.commit_transaction) -> "Sent to L1" + # Batch entity itself has no batch_number + not Map.has_key?(via_item, :batch_number) -> "Sealed on L2" + not is_nil(via_item.batch_number) -> "Sealed on L2" + true -> "Processed on L2" + end + end +end diff --git a/apps/block_scout_web/mix.exs b/apps/block_scout_web/mix.exs index 9dc909757654..67202cb5736f 100644 --- a/apps/block_scout_web/mix.exs +++ b/apps/block_scout_web/mix.exs @@ -32,6 +32,7 @@ defmodule BlockScoutWeb.Mixfile do Explorer.Chain.Optimism.OutputRoot, Explorer.Chain.Optimism.WithdrawalEvent, Explorer.Chain.ZkSync.Reader, + Explorer.Chain.Via.Reader, Explorer.Chain.Arbitrum.Reader ] ] diff --git a/apps/block_scout_web/test/block_scout_web/controllers/api/rpc/contract_controller_test.exs b/apps/block_scout_web/test/block_scout_web/controllers/api/rpc/contract_controller_test.exs index 04e2eb956c02..cb94084e4a42 100644 --- a/apps/block_scout_web/test/block_scout_web/controllers/api/rpc/contract_controller_test.exs +++ b/apps/block_scout_web/test/block_scout_web/controllers/api/rpc/contract_controller_test.exs @@ -10,7 +10,8 @@ defmodule BlockScoutWeb.API.RPC.ContractControllerTest do setup :verify_on_exit! - if Application.compile_env(:explorer, :chain_type) == :zksync do + if Application.compile_env(:explorer, :chain_type) == :zksync || + Application.compile_env(:explorer, :chain_type) == :via do @optimization_runs "0" else @optimization_runs 200 diff --git a/apps/block_scout_web/test/block_scout_web/controllers/api/v2/smart_contract_controller_test.exs b/apps/block_scout_web/test/block_scout_web/controllers/api/v2/smart_contract_controller_test.exs index 74d1fb63230f..8f22c2acd84f 100644 --- a/apps/block_scout_web/test/block_scout_web/controllers/api/v2/smart_contract_controller_test.exs +++ b/apps/block_scout_web/test/block_scout_web/controllers/api/v2/smart_contract_controller_test.exs @@ -659,7 +659,8 @@ defmodule BlockScoutWeb.API.V2.SmartContractControllerTest do end end - if Application.compile_env(:explorer, :chain_type) !== :zksync do + if Application.compile_env(:explorer, :chain_type) !== :zksync && + Application.compile_env(:explorer, :chain_type) !== :via do describe "/smart-contracts/{address_hash} <> eth_bytecode_db" do setup do old_interval_env = Application.get_env(:explorer, Explorer.Chain.Fetcher.LookUpSmartContractSourcesOnDemand) diff --git a/apps/block_scout_web/test/block_scout_web/controllers/api/v2/verification_controller_test.exs b/apps/block_scout_web/test/block_scout_web/controllers/api/v2/verification_controller_test.exs index 2a83fcfbc87e..33cdeb511ba8 100644 --- a/apps/block_scout_web/test/block_scout_web/controllers/api/v2/verification_controller_test.exs +++ b/apps/block_scout_web/test/block_scout_web/controllers/api/v2/verification_controller_test.exs @@ -34,7 +34,8 @@ defmodule BlockScoutWeb.API.V2.VerificationControllerTest do end end - if Application.compile_env(:explorer, :chain_type) !== :zksync do + if Application.compile_env(:explorer, :chain_type) !== :zksync && + Application.compile_env(:explorer, :chain_type) !== :via do describe "/api/v2/smart-contracts/{address_hash}/verification/via/flattened-code" do test "get 200 for verified contract", %{conn: conn} do contract = insert(:smart_contract) diff --git a/apps/explorer/config/dev.exs b/apps/explorer/config/dev.exs index b6a0f56ad222..2223fd6c4d54 100644 --- a/apps/explorer/config/dev.exs +++ b/apps/explorer/config/dev.exs @@ -31,6 +31,7 @@ for repo <- [ Explorer.Repo.Suave, Explorer.Repo.Zilliqa, Explorer.Repo.ZkSync, + Explorer.Repo.Via, Explorer.Repo.Neon ] do config :explorer, repo, timeout: :timer.seconds(80) diff --git a/apps/explorer/config/prod.exs b/apps/explorer/config/prod.exs index b272b96fb969..68affaf0064b 100644 --- a/apps/explorer/config/prod.exs +++ b/apps/explorer/config/prod.exs @@ -33,6 +33,7 @@ for repo <- [ Explorer.Repo.Suave, Explorer.Repo.Zilliqa, Explorer.Repo.ZkSync, + Explorer.Repo.Via, Explorer.Repo.Neon ] do config :explorer, repo, diff --git a/apps/explorer/config/test.exs b/apps/explorer/config/test.exs index 675668e45d7a..67a9174ec0ba 100644 --- a/apps/explorer/config/test.exs +++ b/apps/explorer/config/test.exs @@ -72,6 +72,7 @@ for repo <- [ Explorer.Repo.Suave, Explorer.Repo.Zilliqa, Explorer.Repo.ZkSync, + Explorer.Repo.Via, Explorer.Repo.Neon ] do config :explorer, repo, diff --git a/apps/explorer/lib/explorer/application.ex b/apps/explorer/lib/explorer/application.ex index 8bbe921094d3..c0d2d67423a0 100644 --- a/apps/explorer/lib/explorer/application.ex +++ b/apps/explorer/lib/explorer/application.ex @@ -299,6 +299,7 @@ defmodule Explorer.Application do :indexer ), Explorer.Migrator.RefetchContractCodes |> configure() |> configure_chain_type_dependent_process(:zksync), + Explorer.Migrator.RefetchContractCodes |> configure() |> configure_chain_type_dependent_process(:via), configure(Explorer.Chain.Fetcher.AddressesBlacklist), Explorer.Migrator.SwitchPendingOperations, configure_mode_dependent_process(Explorer.Utility.RateLimiter, :api) @@ -327,6 +328,7 @@ defmodule Explorer.Application do Explorer.Repo.Stability, Explorer.Repo.Suave, Explorer.Repo.Zilliqa, + Explorer.Repo.Via, Explorer.Repo.ZkSync ] else diff --git a/apps/explorer/lib/explorer/chain/address.ex b/apps/explorer/lib/explorer/chain/address.ex index fc85d335bb75..999c28df88c5 100644 --- a/apps/explorer/lib/explorer/chain/address.ex +++ b/apps/explorer/lib/explorer/chain/address.ex @@ -66,6 +66,13 @@ defmodule Explorer.Chain.Address.Schema do ] end + :via -> + quote do + [ + field(:contract_code_refetched, :boolean) + ] + end + _ -> [] end) @@ -153,6 +160,9 @@ defmodule Explorer.Chain.Address do :zksync -> ~w(contract_code_refetched)a + :via -> + ~w(contract_code_refetched)a + _ -> [] end) diff --git a/apps/explorer/lib/explorer/chain/block.ex b/apps/explorer/lib/explorer/chain/block.ex index 685f3bee5fcd..40c76d5a333c 100644 --- a/apps/explorer/lib/explorer/chain/block.ex +++ b/apps/explorer/lib/explorer/chain/block.ex @@ -25,6 +25,7 @@ defmodule Explorer.Chain.Block.Schema do alias Explorer.Chain.Zilliqa.AggregateQuorumCertificate, as: ZilliqaAggregateQuorumCertificate alias Explorer.Chain.Zilliqa.QuorumCertificate, as: ZilliqaQuorumCertificate alias Explorer.Chain.ZkSync.BatchBlock, as: ZkSyncBatchBlock + alias Explorer.Chain.Via.BatchBlock, as: ViaBatchBlock @chain_type_fields (case @chain_type do :ethereum -> @@ -72,6 +73,17 @@ defmodule Explorer.Chain.Block.Schema do end, 2 ) + :via -> + elem( + quote do + has_one(:via_batch_block, ViaBatchBlock, foreign_key: :hash, references: :hash) + has_one(:via_batch, through: [:via_batch_block, :batch]) + has_one(:via_commit_transaction, through: [:via_batch, :commit_transaction]) + has_one(:via_prove_transaction, through: [:via_batch, :prove_transaction]) + has_one(:via_execute_transaction, through: [:via_batch, :execute_transaction]) + end, + 2 + ) :celo -> elem( diff --git a/apps/explorer/lib/explorer/chain/health/monitor.ex b/apps/explorer/lib/explorer/chain/health/monitor.ex index 5cdb25080989..b386e3777e72 100644 --- a/apps/explorer/lib/explorer/chain/health/monitor.ex +++ b/apps/explorer/lib/explorer/chain/health/monitor.ex @@ -14,6 +14,7 @@ defmodule Explorer.Chain.Health.Monitor do alias Explorer.Chain.PolygonZkevm.Reader, as: PolygonZkevmReader alias Explorer.Chain.Scroll.Reader, as: ScrollReader alias Explorer.Chain.ZkSync.Reader, as: ZkSyncReader + alias Explorer.Chain.Via.Reader, as: ViaReader alias Explorer.Repo def start_link(_) do @@ -77,6 +78,9 @@ defmodule Explorer.Chain.Health.Monitor do :zksync -> get_latest_batch_info_from_module(ZkSyncReader) + :via -> + get_latest_batch_info_from_module(ViaReader) + :optimism -> get_latest_batch_info_from_module(OptimismReader) diff --git a/apps/explorer/lib/explorer/chain/import/runner/via/batch_blocks.ex b/apps/explorer/lib/explorer/chain/import/runner/via/batch_blocks.ex new file mode 100644 index 000000000000..db384c355070 --- /dev/null +++ b/apps/explorer/lib/explorer/chain/import/runner/via/batch_blocks.ex @@ -0,0 +1,79 @@ +defmodule Explorer.Chain.Import.Runner.Via.BatchBlocks do + @moduledoc """ + Bulk imports `t:Explorer.Chain.Via.BatchBlock.t/0`. + """ + + require Ecto.Query + + alias Ecto.{Changeset, Multi, Repo} + alias Explorer.Chain.Import + alias Explorer.Chain.Via.BatchBlock + alias Explorer.Prometheus.Instrumenter + + @behaviour Import.Runner + + # milliseconds + @timeout 60_000 + + @type imported :: [BatchBlock.t()] + + @impl Import.Runner + def ecto_schema_module, do: BatchBlock + + @impl Import.Runner + def option_key, do: :via_batch_blocks + + @impl Import.Runner + @spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()} + def imported_table_row do + %{ + value_type: "[#{ecto_schema_module()}.t()]", + value_description: "List of `t:#{ecto_schema_module()}.t/0`s" + } + end + + @impl Import.Runner + @spec run(Multi.t(), list(), map()) :: Multi.t() + def run(multi, changes_list, %{timestamps: timestamps} = options) do + insert_options = + options + |> Map.get(option_key(), %{}) + |> Map.take(~w(on_conflict timeout)a) + |> Map.put_new(:timeout, @timeout) + |> Map.put(:timestamps, timestamps) + + Multi.run(multi, :insert_via_batch_blocks, fn repo, _ -> + Instrumenter.block_import_stage_runner( + fn -> insert(repo, changes_list, insert_options) end, + :block_referencing, + :via_batch_blocks, + :via_batch_blocks + ) + end) + end + + @impl Import.Runner + def timeout, do: @timeout + + @spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) :: + {:ok, [BatchBlock.t()]} + | {:error, [Changeset.t()]} + def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = _options) when is_list(changes_list) do + # Enforce Via.BatchBlock ShareLocks order (see docs: sharelock.md) + ordered_changes_list = Enum.sort_by(changes_list, & &1.hash) + + {:ok, inserted} = + Import.insert_changes_list( + repo, + ordered_changes_list, + for: BatchBlock, + returning: true, + timeout: timeout, + timestamps: timestamps, + conflict_target: :hash, + on_conflict: :nothing + ) + + {:ok, inserted} + end +end diff --git a/apps/explorer/lib/explorer/chain/import/runner/via/batch_transactions.ex b/apps/explorer/lib/explorer/chain/import/runner/via/batch_transactions.ex new file mode 100644 index 000000000000..e925d22e5062 --- /dev/null +++ b/apps/explorer/lib/explorer/chain/import/runner/via/batch_transactions.ex @@ -0,0 +1,79 @@ +defmodule Explorer.Chain.Import.Runner.Via.BatchTransactions do + @moduledoc """ + Bulk imports `t:Explorer.Chain.Via.BatchTransaction.t/0`. + """ + + require Ecto.Query + + alias Ecto.{Changeset, Multi, Repo} + alias Explorer.Chain.Import + alias Explorer.Chain.Via.BatchTransaction + alias Explorer.Prometheus.Instrumenter + + @behaviour Import.Runner + + # milliseconds + @timeout 60_000 + + @type imported :: [BatchTransaction.t()] + + @impl Import.Runner + def ecto_schema_module, do: BatchTransaction + + @impl Import.Runner + def option_key, do: :via_batch_transactions + + @impl Import.Runner + @spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()} + def imported_table_row do + %{ + value_type: "[#{ecto_schema_module()}.t()]", + value_description: "List of `t:#{ecto_schema_module()}.t/0`s" + } + end + + @impl Import.Runner + @spec run(Multi.t(), list(), map()) :: Multi.t() + def run(multi, changes_list, %{timestamps: timestamps} = options) do + insert_options = + options + |> Map.get(option_key(), %{}) + |> Map.take(~w(on_conflict timeout)a) + |> Map.put_new(:timeout, @timeout) + |> Map.put(:timestamps, timestamps) + + Multi.run(multi, :insert_via_batch_transactions, fn repo, _ -> + Instrumenter.block_import_stage_runner( + fn -> insert(repo, changes_list, insert_options) end, + :block_referencing, + :via_batch_transactions, + :via_batch_transactions + ) + end) + end + + @impl Import.Runner + def timeout, do: @timeout + + @spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) :: + {:ok, [BatchTransaction.t()]} + | {:error, [Changeset.t()]} + def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = _options) when is_list(changes_list) do + # Enforce Via.BatchTransaction ShareLocks order (see docs: sharelock.md) + ordered_changes_list = Enum.sort_by(changes_list, & &1.transaction_hash) + + {:ok, inserted} = + Import.insert_changes_list( + repo, + ordered_changes_list, + for: BatchTransaction, + returning: true, + timeout: timeout, + timestamps: timestamps, + conflict_target: :transaction_hash, + on_conflict: :nothing + ) + + {:ok, inserted} + end +end diff --git a/apps/explorer/lib/explorer/chain/import/runner/via/lifecycle_transactions.ex b/apps/explorer/lib/explorer/chain/import/runner/via/lifecycle_transactions.ex new file mode 100644 index 000000000000..117009aca277 --- /dev/null +++ b/apps/explorer/lib/explorer/chain/import/runner/via/lifecycle_transactions.ex @@ -0,0 +1,103 @@ +defmodule Explorer.Chain.Import.Runner.Via.LifecycleTransactions do + @moduledoc """ + Bulk imports `t:Explorer.Chain.Via.LifecycleTransaction.t/0`. + """ + + require Ecto.Query + + alias Ecto.{Changeset, Multi, Repo} + alias Explorer.Chain.Import + alias Explorer.Chain.Via.LifecycleTransaction + alias Explorer.Prometheus.Instrumenter + + import Ecto.Query, only: [from: 2] + + @behaviour Import.Runner + + # milliseconds + @timeout 60_000 + + @type imported :: [LifecycleTransaction.t()] + + @impl Import.Runner + def ecto_schema_module, do: LifecycleTransaction + + @impl Import.Runner + def option_key, do: :via_lifecycle_transactions + + @impl Import.Runner + @spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()} + def imported_table_row do + %{ + value_type: "[#{ecto_schema_module()}.t()]", + value_description: "List of `t:#{ecto_schema_module()}.t/0`s" + } + end + + @impl Import.Runner + @spec run(Multi.t(), list(), map()) :: Multi.t() + def run(multi, changes_list, %{timestamps: timestamps} = options) do + insert_options = + options + |> Map.get(option_key(), %{}) + |> Map.take(~w(on_conflict timeout)a) + |> Map.put_new(:timeout, @timeout) + |> Map.put(:timestamps, timestamps) + + Multi.run(multi, :insert_via_lifecycle_transactions, fn repo, _ -> + Instrumenter.block_import_stage_runner( + fn -> insert(repo, changes_list, insert_options) end, + :block_referencing, + :via_lifecycle_transactions, + :via_lifecycle_transactions + ) + end) + end + + @impl Import.Runner + def timeout, do: @timeout + + @spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) :: + {:ok, [LifecycleTransaction.t()]} + | {:error, [Changeset.t()]} + def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = options) when is_list(changes_list) do + on_conflict = Map.get_lazy(options, :on_conflict, &default_on_conflict/0) + + # Enforce Via.LifecycleTransaction ShareLocks order (see docs: sharelock.md) + ordered_changes_list = Enum.sort_by(changes_list, & &1.id) + + {:ok, inserted} = + Import.insert_changes_list( + repo, + ordered_changes_list, + for: LifecycleTransaction, + returning: true, + timeout: timeout, + timestamps: timestamps, + conflict_target: :hash, + on_conflict: on_conflict + ) + + {:ok, inserted} + end + + defp default_on_conflict do + from( + transaction in LifecycleTransaction, + update: [ + set: [ + # don't update `id` as it is a primary key + # don't update `hash` as it is a unique index and used for the conflict target + timestamp: fragment("EXCLUDED.timestamp"), + inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", transaction.inserted_at), + updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", transaction.updated_at) + ] + ], + where: + fragment( + "(EXCLUDED.timestamp) IS DISTINCT FROM (?)", + transaction.timestamp + ) + ) + end +end diff --git a/apps/explorer/lib/explorer/chain/import/runner/via/transaction_batches.ex b/apps/explorer/lib/explorer/chain/import/runner/via/transaction_batches.ex new file mode 100644 index 000000000000..d9a0d68517a9 --- /dev/null +++ b/apps/explorer/lib/explorer/chain/import/runner/via/transaction_batches.ex @@ -0,0 +1,122 @@ +defmodule Explorer.Chain.Import.Runner.Via.TransactionBatches do + @moduledoc """ + Bulk imports `t:Explorer.Chain.Via.TransactionBatch.t/0`. + """ + + require Ecto.Query + + alias Ecto.{Changeset, Multi, Repo} + alias Explorer.Chain.Import + alias Explorer.Chain.Via.TransactionBatch + alias Explorer.Prometheus.Instrumenter + + import Ecto.Query, only: [from: 2] + + @behaviour Import.Runner + + # milliseconds + @timeout 60_000 + + @type imported :: [TransactionBatch.t()] + + @impl Import.Runner + def ecto_schema_module, do: TransactionBatch + + @impl Import.Runner + def option_key, do: :via_transaction_batches + + @impl Import.Runner + @spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()} + def imported_table_row do + %{ + value_type: "[#{ecto_schema_module()}.t()]", + value_description: "List of `t:#{ecto_schema_module()}.t/0`s" + } + end + + @impl Import.Runner + @spec run(Multi.t(), list(), map()) :: Multi.t() + def run(multi, changes_list, %{timestamps: timestamps} = options) do + insert_options = + options + |> Map.get(option_key(), %{}) + |> Map.take(~w(on_conflict timeout)a) + |> Map.put_new(:timeout, @timeout) + |> Map.put(:timestamps, timestamps) + + Multi.run(multi, :insert_via_transaction_batches, fn repo, _ -> + Instrumenter.block_import_stage_runner( + fn -> insert(repo, changes_list, insert_options) end, + :block_referencing, + :via_transaction_batches, + :via_transaction_batches + ) + end) + end + + @impl Import.Runner + def timeout, do: @timeout + + @spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) :: + {:ok, [TransactionBatch.t()]} + | {:error, [Changeset.t()]} + def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = options) when is_list(changes_list) do + on_conflict = Map.get_lazy(options, :on_conflict, &default_on_conflict/0) + + # Enforce Via.TransactionBatch ShareLocks order (see docs: sharelock.md) + ordered_changes_list = Enum.sort_by(changes_list, & &1.number) + + {:ok, inserted} = + Import.insert_changes_list( + repo, + ordered_changes_list, + for: TransactionBatch, + returning: true, + timeout: timeout, + timestamps: timestamps, + conflict_target: :number, + on_conflict: on_conflict + ) + + {:ok, inserted} + end + + defp default_on_conflict do + from( + tb in TransactionBatch, + update: [ + set: [ + # don't update `number` as it is a primary key and used for the conflict target + timestamp: fragment("EXCLUDED.timestamp"), + l1_transaction_count: fragment("EXCLUDED.l1_transaction_count"), + l2_transaction_count: fragment("EXCLUDED.l2_transaction_count"), + root_hash: fragment("EXCLUDED.root_hash"), + l1_gas_price: fragment("EXCLUDED.l1_gas_price"), + l2_fair_gas_price: fragment("EXCLUDED.l2_fair_gas_price"), + start_block: fragment("EXCLUDED.start_block"), + end_block: fragment("EXCLUDED.end_block"), + commit_id: fragment("EXCLUDED.commit_id"), + prove_id: fragment("EXCLUDED.prove_id"), + execute_id: fragment("EXCLUDED.execute_id"), + inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", tb.inserted_at), + updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", tb.updated_at) + ] + ], + where: + fragment( + "(EXCLUDED.timestamp, EXCLUDED.l1_transaction_count, EXCLUDED.l2_transaction_count, EXCLUDED.root_hash, EXCLUDED.l1_gas_price, EXCLUDED.l2_fair_gas_price, EXCLUDED.start_block, EXCLUDED.end_block, EXCLUDED.commit_id, EXCLUDED.prove_id, EXCLUDED.execute_id) IS DISTINCT FROM (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + tb.timestamp, + tb.l1_transaction_count, + tb.l2_transaction_count, + tb.root_hash, + tb.l1_gas_price, + tb.l2_fair_gas_price, + tb.start_block, + tb.end_block, + tb.commit_id, + tb.prove_id, + tb.execute_id + ) + ) + end +end diff --git a/apps/explorer/lib/explorer/chain/import/stage/chain_type_specific.ex b/apps/explorer/lib/explorer/chain/import/stage/chain_type_specific.ex index 1b4d182cc841..6ef07098c572 100644 --- a/apps/explorer/lib/explorer/chain/import/stage/chain_type_specific.ex +++ b/apps/explorer/lib/explorer/chain/import/stage/chain_type_specific.ex @@ -39,6 +39,12 @@ defmodule Explorer.Chain.Import.Stage.ChainTypeSpecific do Runner.ZkSync.BatchTransactions, Runner.ZkSync.BatchBlocks ], + via: [ + Runner.Via.LifecycleTransactions, + Runner.Via.TransactionBatches, + Runner.Via.BatchTransactions, + Runner.Via.BatchBlocks + ], shibarium: [ Runner.Shibarium.BridgeOperations ], diff --git a/apps/explorer/lib/explorer/chain/smart_contract.ex b/apps/explorer/lib/explorer/chain/smart_contract.ex index ffd1ee65a1af..75a6064a91ba 100644 --- a/apps/explorer/lib/explorer/chain/smart_contract.ex +++ b/apps/explorer/lib/explorer/chain/smart_contract.ex @@ -13,6 +13,14 @@ defmodule Explorer.Chain.SmartContract.Schema do } case @chain_type do + :via -> + @chain_type_fields quote( + do: [ + field(:optimization_runs, :string), + field(:zk_compiler_version, :string, null: true) + ] + ) + :zksync -> @chain_type_fields quote( do: [ @@ -148,6 +156,9 @@ defmodule Explorer.Chain.SmartContract do :zksync -> ~w(zk_compiler_version)a + :via -> + ~w(zk_compiler_version)a + :arbitrum -> ~w(package_name github_repository_metadata)a @@ -188,6 +199,30 @@ defmodule Explorer.Chain.SmartContract do "type" => "function" } ] + @create_via_abi [ + %{ + "inputs" => [ + %{"internalType" => "bytes32", "name" => "_salt", "type" => "bytes32"}, + %{"internalType" => "bytes32", "name" => "_bytecodeHash", "type" => "bytes32"}, + %{"internalType" => "bytes", "name" => "_input", "type" => "bytes"} + ], + "name" => "create2", + "outputs" => [%{"internalType" => "address", "name" => "", "type" => "address"}], + "stateMutability" => "payable", + "type" => "function" + }, + %{ + "inputs" => [ + %{"internalType" => "bytes32", "name" => "_salt", "type" => "bytes32"}, + %{"internalType" => "bytes32", "name" => "_bytecodeHash", "type" => "bytes32"}, + %{"internalType" => "bytes", "name" => "_input", "type" => "bytes"} + ], + "name" => "create", + "outputs" => [%{"internalType" => "address", "name" => "", "type" => "address"}], + "stateMutability" => "payable", + "type" => "function" + } + ] @default_languages ~w(solidity vyper yul)a @chain_type_languages (case @chain_type do @@ -451,6 +486,9 @@ defmodule Explorer.Chain.SmartContract do :zksync -> """ * `zk_compiler_version` - the version of ZkSolc or ZkVyper compilers. """ + :via -> """ + * `zk_compiler_version` - the version of ZkSolc or ZkVyper compilers. + """ :arbitrum -> """ * `package_name` - package name of stylus contract. * `github_repository_metadata` - map with repository details. @@ -580,7 +618,7 @@ defmodule Explorer.Chain.SmartContract do def merge_twin_contract_with_changeset(nil, %Changeset{} = changeset) do optimization_runs = - if Application.get_env(:explorer, :chain_type) == :zksync, + if Application.get_env(:explorer, :chain_type) == :zksync || Application.get_env(:explorer, :chain_type) == :via, do: "0", else: "200" @@ -593,7 +631,8 @@ defmodule Explorer.Chain.SmartContract do |> Changeset.put_change(:contract_source_code, "") |> Changeset.put_change(:autodetect_constructor_args, true) |> Changeset.put_change(:is_yul, false) - |> (&if(Application.get_env(:explorer, :chain_type) == :zksync, + |> (&if( + Application.get_env(:explorer, :chain_type) == :zksync || Application.get_env(:explorer, :chain_type) == :via, do: Changeset.put_change(&1, :zk_compiler_version, "latest"), else: &1 )).() @@ -616,12 +655,14 @@ defmodule Explorer.Chain.SmartContract do merge_twin_vyper_contract_with_changeset(nil, changeset) end + # VIA-TODO def merge_twin_vyper_contract_with_changeset(nil, %Changeset{} = changeset) do changeset |> Changeset.put_change(:name, "Vyper_contract") |> Changeset.put_change(:compiler_version, "latest") |> Changeset.put_change(:contract_source_code, "") - |> (&if(Application.get_env(:explorer, :chain_type) == :zksync, + |> (&if( + Application.get_env(:explorer, :chain_type) == :zksync || Application.get_env(:explorer, :chain_type) == :via, do: Changeset.put_change(&1, :zk_compiler_version, "latest"), else: &1 )).() @@ -1543,6 +1584,30 @@ defmodule Explorer.Chain.SmartContract do end end + @doc """ + Retrieves the constructor arguments for a via smart contract. + Using @create_via_abi function decodes transaction input of contract creation + + ## Parameters + - `binary()`: The binary data representing the smart contract. + + ## Returns + - `nil`: If the constructor arguments cannot be retrieved. + - `binary()`: The constructor arguments in binary format. + """ + @spec via_get_constructor_arguments(binary()) :: nil | binary() + def via_get_constructor_arguments(address_hash_string) do + creation_input = Chain.contract_creation_input_data_from_transaction(address_hash_string) + + case @create_via_abi |> ABI.parse_specification() |> ABI.find_and_decode(creation_input) do + {%FunctionSelector{}, [_, _, constructor_args]} -> + Base.encode16(constructor_args, case: :lower) + + _ -> + nil + end + end + @doc """ Retrieves the smart contract language, taking legacy fields into account for compatibility. It first tries to retrieve the language from the `language` diff --git a/apps/explorer/lib/explorer/chain/transaction.ex b/apps/explorer/lib/explorer/chain/transaction.ex index c4e6b39d9316..c1f3fb81cadb 100644 --- a/apps/explorer/lib/explorer/chain/transaction.ex +++ b/apps/explorer/lib/explorer/chain/transaction.ex @@ -30,6 +30,7 @@ defmodule Explorer.Chain.Transaction.Schema do alias Explorer.Chain.PolygonZkevm.BatchTransaction, as: ZkevmBatchTransaction alias Explorer.Chain.Transaction.{Fork, Status} alias Explorer.Chain.ZkSync.BatchTransaction, as: ZkSyncBatchTransaction + alias Explorer.Chain.Via.BatchTransaction, as: ViaBatchTransaction @chain_type_fields (case @chain_type do :ethereum -> @@ -136,6 +137,22 @@ defmodule Explorer.Chain.Transaction.Schema do 2 ) + :via -> + elem( + quote do + has_one(:via_batch_transaction, ViaBatchTransaction, + foreign_key: :transaction_hash, + references: :hash + ) + + has_one(:via_batch, through: [:via_batch_transaction, :batch]) + has_one(:via_commit_transaction, through: [:via_batch, :commit_transaction]) + has_one(:via_prove_transaction, through: [:via_batch, :prove_transaction]) + has_one(:via_execute_transaction, through: [:via_batch, :execute_transaction]) + end, + 2 + ) + :celo -> elem( quote do diff --git a/apps/explorer/lib/explorer/chain/via/batch_block.ex b/apps/explorer/lib/explorer/chain/via/batch_block.ex new file mode 100644 index 000000000000..7ac626e83527 --- /dev/null +++ b/apps/explorer/lib/explorer/chain/via/batch_block.ex @@ -0,0 +1,37 @@ +defmodule Explorer.Chain.Via.BatchBlock do + @moduledoc "Models a list of blocks related to a batch for Via." + + use Explorer.Schema + + alias Explorer.Chain.{Block, Hash} + alias Explorer.Chain.Via.TransactionBatch + + @required_attrs ~w(batch_number hash)a + + @type t :: %__MODULE__{ + batch_number: non_neg_integer(), + batch: %Ecto.Association.NotLoaded{} | TransactionBatch.t() | nil, + hash: Hash.t(), + block: %Ecto.Association.NotLoaded{} | Block.t() | nil + } + + @primary_key false + schema "via_batch_l2_blocks" do + belongs_to(:batch, TransactionBatch, foreign_key: :batch_number, references: :number, type: :integer) + belongs_to(:block, Block, foreign_key: :hash, primary_key: true, references: :hash, type: Hash.Full) + + timestamps() + end + + @doc """ + Validates that the `attrs` are valid. + """ + @spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t() + def changeset(%__MODULE__{} = items, attrs \\ %{}) do + items + |> cast(attrs, @required_attrs) + |> validate_required(@required_attrs) + |> foreign_key_constraint(:batch_number) + |> unique_constraint(:hash) + end +end diff --git a/apps/explorer/lib/explorer/chain/via/batch_transaction.ex b/apps/explorer/lib/explorer/chain/via/batch_transaction.ex new file mode 100644 index 000000000000..618f3569432c --- /dev/null +++ b/apps/explorer/lib/explorer/chain/via/batch_transaction.ex @@ -0,0 +1,51 @@ +defmodule Explorer.Chain.Via.BatchTransaction do + @moduledoc """ + Models a list of transactions related to a batch for Via. + + Changes in the schema should be reflected in the bulk import module: + - Explorer.Chain.Import.Runner.Via.BatchTransactions + + Migrations: + - Explorer.Repo.Via.Migrations.CreateViaTables + - Explorer.Repo.Via.Migrations.RenameFieldInBatchTransactions + """ + + use Explorer.Schema + + alias Explorer.Chain.{Hash, Transaction} + alias Explorer.Chain.Via.TransactionBatch + + @required_attrs ~w(batch_number transaction_hash)a + + @typedoc """ + * `transaction_hash` - The hash of the rollup transaction. + * `l2_transaction` - An instance of `Explorer.Chain.Transaction` referenced by `transaction_hash`. + * `batch_number` - The number of the Via batch. + * `batch` - An instance of `Explorer.Chain.Via.TransactionBatch` referenced by `batch_number`. + """ + @primary_key false + typed_schema "via_batch_l2_transactions" do + belongs_to(:batch, TransactionBatch, foreign_key: :batch_number, references: :number, type: :integer) + + belongs_to(:l2_transaction, Transaction, + foreign_key: :transaction_hash, + primary_key: true, + references: :hash, + type: Hash.Full + ) + + timestamps() + end + + @doc """ + Validates that the `attrs` are valid. + """ + @spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t() + def changeset(%__MODULE__{} = transactions, attrs \\ %{}) do + transactions + |> cast(attrs, @required_attrs) + |> validate_required(@required_attrs) + |> foreign_key_constraint(:batch_number) + |> unique_constraint(:transaction_hash) + end +end diff --git a/apps/explorer/lib/explorer/chain/via/lifecycle_transaction.ex b/apps/explorer/lib/explorer/chain/via/lifecycle_transaction.ex new file mode 100644 index 000000000000..8895a6ca745b --- /dev/null +++ b/apps/explorer/lib/explorer/chain/via/lifecycle_transaction.ex @@ -0,0 +1,38 @@ +defmodule Explorer.Chain.Via.LifecycleTransaction do + @moduledoc "Models an L1 lifecycle transaction for Via." + + use Explorer.Schema + + alias Explorer.Chain.Hash + alias Explorer.Chain.Via.TransactionBatch + + @required_attrs ~w(id hash timestamp)a + + @type t :: %__MODULE__{ + hash: Hash.t(), + timestamp: DateTime.t() + } + + @primary_key {:id, :integer, autogenerate: false} + schema "via_lifecycle_l1_transactions" do + field(:hash, Hash.Full) + field(:timestamp, :utc_datetime_usec) + + has_many(:committed_batches, TransactionBatch, foreign_key: :commit_id) + has_many(:proven_batches, TransactionBatch, foreign_key: :prove_id) + has_many(:executed_batches, TransactionBatch, foreign_key: :execute_id) + + timestamps() + end + + @doc """ + Validates that the `attrs` are valid. + """ + @spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t() + def changeset(%__MODULE__{} = txn, attrs \\ %{}) do + txn + |> cast(attrs, @required_attrs) + |> validate_required(@required_attrs) + |> unique_constraint(:id) + end +end diff --git a/apps/explorer/lib/explorer/chain/via/reader.ex b/apps/explorer/lib/explorer/chain/via/reader.ex new file mode 100644 index 000000000000..7dee6795829c --- /dev/null +++ b/apps/explorer/lib/explorer/chain/via/reader.ex @@ -0,0 +1,381 @@ +defmodule Explorer.Chain.Via.Reader do + @moduledoc "Contains read functions for via modules." + + import Ecto.Query, + only: [ + from: 2, + limit: 2, + order_by: 2, + where: 2, + where: 3 + ] + + import Explorer.Chain, only: [select_repo: 1] + + alias Explorer.Chain.Via.{ + BatchTransaction, + LifecycleTransaction, + TransactionBatch + } + + alias Explorer.{Chain, PagingOptions, Repo} + alias Explorer.Prometheus.Instrumenter + + @doc """ + Receives total amount of batches imported to the `via_transaction_batches` table. + + ## Parameters + - `options`: passed to `Chain.select_repo()` + + ## Returns + Total amount of batches + """ + @spec batches_count(keyword()) :: any() + def batches_count(options) do + TransactionBatch + |> select_repo(options).aggregate(:count, timeout: :infinity) + end + + @doc """ + Receives the batch from the `via_transaction_batches` table by using its number or the latest batch if `:latest` is used. + + ## Parameters + - `number`: could be either the batch number or `:latest` to get the latest available in DB batch + - `options`: passed to `Chain.select_repo()` + + ## Returns + - `{:ok, Explorer.Chain.Via.TransactionBatch}` if the batch found + - `{:error, :not_found}` if there is no batch with such number + """ + @spec batch(:latest | binary() | integer(), keyword()) :: + {:error, :not_found} | {:ok, Explorer.Chain.Via.TransactionBatch} + def batch(number, options) + + def batch(:latest, options) when is_list(options) do + TransactionBatch + |> order_by(desc: :number) + |> limit(1) + |> select_repo(options).one() + |> case do + nil -> {:error, :not_found} + batch -> {:ok, batch} + end + end + + def batch(number, options) + when (is_integer(number) or is_binary(number)) and + is_list(options) do + necessity_by_association = Keyword.get(options, :necessity_by_association, %{}) + + TransactionBatch + |> where(number: ^number) + |> Chain.join_associations(necessity_by_association) + |> select_repo(options).one() + |> case do + nil -> {:error, :not_found} + batch -> {:ok, batch} + end + end + + @doc """ + Receives a list of batches from the `via_transaction_batches` table within the range of batch numbers + + ## Parameters + - `start_number`: The start of the batch numbers range. + - `end_number`: The end of the batch numbers range. + - `options`: Options passed to `Chain.select_repo()`. + + ## Returns + - A list of `Explorer.Chain.Via.TransactionBatch` if at least one batch exists within the range. + - An empty list (`[]`) if no batches within the range are found in the database. + """ + @spec batches(integer(), integer(), keyword()) :: [Explorer.Chain.Via.TransactionBatch] + def batches(start_number, end_number, options) + when is_integer(start_number) and + is_integer(end_number) and + is_list(options) do + necessity_by_association = Keyword.get(options, :necessity_by_association, %{}) + + base_query = from(tb in TransactionBatch, order_by: [desc: tb.number]) + + base_query + |> where([tb], tb.number >= ^start_number and tb.number <= ^end_number) + |> Chain.join_associations(necessity_by_association) + |> select_repo(options).all() + end + + @doc """ + Receives a list of batches from the `via_transaction_batches` table with the numbers defined in the input list. + + ## Parameters + - `numbers`: The list of batch numbers to retrieve from the database. + - `options`: Options passed to `Chain.select_repo()`. + + ## Returns + - A list of `Explorer.Chain.Via.TransactionBatch` if at least one batch matches the numbers from the list. The output list could be less than the input list. + - An empty list (`[]`) if no batches with numbers from the list are found. + """ + @spec batches(maybe_improper_list(integer(), []), keyword()) :: [Explorer.Chain.Via.TransactionBatch] + def batches(numbers, options) + when is_list(numbers) and + is_list(options) do + necessity_by_association = Keyword.get(options, :necessity_by_association, %{}) + + base_query = from(tb in TransactionBatch, order_by: [desc: tb.number]) + + base_query + |> where([tb], tb.number in ^numbers) + |> Chain.join_associations(necessity_by_association) + |> select_repo(options).all() + end + + @doc """ + Receives a list of batches from the `via_transaction_batches` table. + + ## Parameters + - `options`: Options passed to `Chain.select_repo()`. (Optional) + + ## Returns + - If the option `confirmed?` is set, returns the ten latest committed batches (`Explorer.Chain.Via.TransactionBatch`). + - Returns a list of `Explorer.Chain.Via.TransactionBatch` based on the paging options if `confirmed?` is not set. + """ + @spec batches(keyword()) :: [Explorer.Chain.Via.TransactionBatch] + @spec batches() :: [Explorer.Chain.Via.TransactionBatch] + def batches(options \\ []) when is_list(options) do + necessity_by_association = Keyword.get(options, :necessity_by_association, %{}) + + base_query = + from(tb in TransactionBatch, + order_by: [desc: tb.number] + ) + + query = + if Keyword.get(options, :confirmed?, false) do + base_query + |> Chain.join_associations(necessity_by_association) + |> where([tb], not is_nil(tb.commit_id) and tb.commit_id > 0) + |> limit(10) + else + paging_options = Keyword.get(options, :paging_options, Chain.default_paging_options()) + + case paging_options do + %PagingOptions{key: {0}} -> + [] + + _ -> + base_query + |> Chain.join_associations(necessity_by_association) + |> page_batches(paging_options) + |> limit(^paging_options.page_size) + end + end + + select_repo(options).all(query) + end + + @doc """ + Receives a list of transactions from the `via_batch_l2_transactions` table included in a specific batch. + + ## Parameters + - `batch_number`: The number of batch which transactions were included to L1 as part of. + - `options`: Options passed to `Chain.select_repo()`. (Optional) + + ## Returns + - A list of `Explorer.Chain.Via.BatchTransaction` belonging to the specified batch. + """ + @spec batch_transactions(non_neg_integer()) :: [Explorer.Chain.Via.BatchTransaction] + @spec batch_transactions(non_neg_integer(), keyword()) :: [Explorer.Chain.Via.BatchTransaction] + def batch_transactions(batch_number, options \\ []) + when is_integer(batch_number) or + is_binary(batch_number) do + query = from(batch in BatchTransaction, where: batch.batch_number == ^batch_number) + + select_repo(options).all(query) + end + + @doc """ + Gets the number of the earliest batch in the `via_transaction_batches` table where the commitment transaction is not set. + Batch #0 is filtered out, as it does not have a linked commitment transaction. + + ## Returns + - The number of a batch if it exists, otherwise `nil`. `nil` could mean either no batches imported yet or all imported batches are marked as committed or Batch #0 is the only available batch. + """ + @spec earliest_sealed_batch_number() :: non_neg_integer() | nil + def earliest_sealed_batch_number do + query = + from(tb in TransactionBatch, + select: tb.number, + where: is_nil(tb.commit_id) and tb.number > 0, + order_by: [asc: tb.number], + limit: 1 + ) + + query + |> Repo.one() + end + + @doc """ + Gets the number of the earliest batch in the `via_transaction_batches` table where the proving transaction is not set. + Batch #0 is filtered out, as it does not have a linked proving transaction. + + ## Returns + - The number of a batch if it exists, otherwise `nil`. `nil` could mean either no batches imported yet or all imported batches are marked as proven or Batch #0 is the only available batch. + """ + @spec earliest_unproven_batch_number() :: non_neg_integer() | nil + def earliest_unproven_batch_number do + query = + from(tb in TransactionBatch, + select: tb.number, + where: is_nil(tb.prove_id) and tb.number > 0, + order_by: [asc: tb.number], + limit: 1 + ) + + query + |> Repo.one() + end + + @doc """ + Gets the number of the earliest batch in the `via_transaction_batches` table where the executing transaction is not set. + Batch #0 is filtered out, as it does not have a linked executing transaction. + + ## Returns + - The number of a batch if it exists, otherwise `nil`. `nil` could mean either no batches imported yet or all imported batches are marked as executed or Batch #0 is the only available batch. + """ + @spec earliest_unexecuted_batch_number() :: non_neg_integer() | nil + def earliest_unexecuted_batch_number do + query = + from(tb in TransactionBatch, + select: tb.number, + where: is_nil(tb.execute_id) and tb.number > 0, + order_by: [asc: tb.number], + limit: 1 + ) + + query + |> Repo.one() + end + + @doc """ + Gets the number of the oldest batch from the `via_transaction_batches` table. + + ## Returns + - The number of a batch if it exists, otherwise `nil`. `nil` means that there is no batches imported yet. + """ + @spec oldest_available_batch_number() :: non_neg_integer() | nil + def oldest_available_batch_number do + query = + from(tb in TransactionBatch, + select: tb.number, + order_by: [asc: tb.number], + limit: 1 + ) + + query + |> Repo.one() + end + + @doc """ + Gets the number of the youngest (the most recent) imported batch from the `via_transaction_batches` table. + + ## Returns + - The number of a batch if it exists, otherwise `nil`. `nil` means that there is no batches imported yet. + """ + @spec latest_available_batch_number() :: non_neg_integer() | nil + def latest_available_batch_number do + query = + from(tb in TransactionBatch, + select: tb.number, + order_by: [desc: tb.number], + limit: 1 + ) + + query + |> Repo.one() + end + + @doc """ + Reads a list of L1 transactions by their hashes from the `via_lifecycle_l1_transactions` table. + + ## Parameters + - `l1_transaction_hashes`: A list of hashes to retrieve L1 transactions for. + + ## Returns + - A list of `Explorer.Chain.Via.LifecycleTransaction` corresponding to the hashes from the input list. The output list may be smaller than the input list. + """ + @spec lifecycle_transactions(maybe_improper_list(binary(), [])) :: [Explorer.Chain.Via.LifecycleTransaction] + def lifecycle_transactions(l1_transaction_hashes) do + query = + from( + lt in LifecycleTransaction, + select: {lt.hash, lt.id}, + where: lt.hash in ^l1_transaction_hashes + ) + + Repo.all(query, timeout: :infinity) + end + + @doc """ + Determines the next index for the L1 transaction available in the `via_lifecycle_l1_transactions` table. + + ## Returns + - The next available index. If there are no L1 transactions imported yet, it will return `1`. + """ + @spec next_id() :: non_neg_integer() + def next_id do + query = + from(lt in LifecycleTransaction, + select: lt.id, + order_by: [desc: lt.id], + limit: 1 + ) + + last_id = + query + |> Repo.one() + |> Kernel.||(0) + + last_id + 1 + end + + defp page_batches(query, %PagingOptions{key: nil}), do: query + + defp page_batches(query, %PagingOptions{key: {number}}) do + from(tb in query, where: tb.number < ^number) + end + + @doc """ + Gets information about the latest batch and calculates average time between commitments. + + ## Parameters + - `options`: Options passed to `Chain.select_repo()`. (Optional) + + ## Returns + - If batches exist and at least one batch is committed: + `{:ok, %{latest_batch_number: integer, latest_batch_timestamp: DateTime.t(), average_batch_time: integer}}` + where: + * latest_batch_number - number of the latest batch in the database + * latest_batch_timestamp - when the latest batch was committed to L1 + * average_batch_time - average number of seconds between commits for the last 10 batches + + - If no committed batches exist: `{:error, :not_found}` + """ + @spec get_latest_batch_info(keyword()) :: {:ok, map()} | {:error, :not_found} + def get_latest_batch_info(options \\ []) do + import Ecto.Query + + latest_batches_query = + from(batch in TransactionBatch, + join: tx in assoc(batch, :commit_transaction), + order_by: [desc: batch.number], + limit: 10, + select: %{ + number: batch.number, + timestamp: tx.timestamp + } + ) + + items = select_repo(options).all(latest_batches_query) + Instrumenter.prepare_batch_metric(items) + end +end diff --git a/apps/explorer/lib/explorer/chain/via/transaction_batch.ex b/apps/explorer/lib/explorer/chain/via/transaction_batch.ex new file mode 100644 index 000000000000..5a92cc598418 --- /dev/null +++ b/apps/explorer/lib/explorer/chain/via/transaction_batch.ex @@ -0,0 +1,83 @@ +defmodule Explorer.Chain.Via.TransactionBatch do + @moduledoc "Models a batch of transactions for Via." + + use Explorer.Schema + + alias Explorer.Chain.{ + Block, + Hash, + Wei + } + + alias Explorer.Chain.Via.{BatchTransaction, LifecycleTransaction} + + @optional_attrs ~w(commit_id prove_id execute_id)a + + @required_attrs ~w(number timestamp l1_transaction_count l2_transaction_count root_hash l1_gas_price l2_fair_gas_price start_block end_block)a + + @type t :: %__MODULE__{ + number: non_neg_integer(), + timestamp: DateTime.t(), + l1_transaction_count: non_neg_integer(), + l2_transaction_count: non_neg_integer(), + root_hash: Hash.t(), + l1_gas_price: Wei.t(), + l2_fair_gas_price: Wei.t(), + start_block: Block.block_number(), + end_block: Block.block_number(), + commit_id: non_neg_integer() | nil, + commit_transaction: %Ecto.Association.NotLoaded{} | LifecycleTransaction.t() | nil, + prove_id: non_neg_integer() | nil, + prove_transaction: %Ecto.Association.NotLoaded{} | LifecycleTransaction.t() | nil, + execute_id: non_neg_integer() | nil, + execute_transaction: %Ecto.Association.NotLoaded{} | LifecycleTransaction.t() | nil + } + + @primary_key {:number, :integer, autogenerate: false} + schema "via_transaction_batches" do + field(:timestamp, :utc_datetime_usec) + field(:l1_transaction_count, :integer) + field(:l2_transaction_count, :integer) + field(:root_hash, Hash.Full) + field(:l1_gas_price, Wei) + field(:l2_fair_gas_price, Wei) + field(:start_block, :integer) + field(:end_block, :integer) + + belongs_to(:commit_transaction, LifecycleTransaction, + foreign_key: :commit_id, + references: :id, + type: :integer + ) + + belongs_to(:prove_transaction, LifecycleTransaction, + foreign_key: :prove_id, + references: :id, + type: :integer + ) + + belongs_to(:execute_transaction, LifecycleTransaction, + foreign_key: :execute_id, + references: :id, + type: :integer + ) + + has_many(:l2_transactions, BatchTransaction, foreign_key: :batch_number) + + timestamps() + end + + @doc """ + Validates that the `attrs` are valid. + """ + @spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t() + def changeset(%__MODULE__{} = batches, attrs \\ %{}) do + batches + |> cast(attrs, @required_attrs ++ @optional_attrs) + |> validate_required(@required_attrs) + |> foreign_key_constraint(:commit_id) + |> foreign_key_constraint(:prove_id) + |> foreign_key_constraint(:execute_id) + |> unique_constraint(:number) + end +end diff --git a/apps/explorer/lib/explorer/repo.ex b/apps/explorer/lib/explorer/repo.ex index e90ac8c601dc..cb54a942abef 100644 --- a/apps/explorer/lib/explorer/repo.ex +++ b/apps/explorer/lib/explorer/repo.ex @@ -152,6 +152,7 @@ defmodule Explorer.Repo do Explorer.Repo.Suave, Explorer.Repo.Zilliqa, Explorer.Repo.ZkSync, + Explorer.Repo.Via, Explorer.Repo.Neon ] do defmodule repo do diff --git a/apps/explorer/lib/explorer/smart_contract/compiler_version.ex b/apps/explorer/lib/explorer/smart_contract/compiler_version.ex index 99f164c43e40..1e54e75910d1 100644 --- a/apps/explorer/lib/explorer/smart_contract/compiler_version.ex +++ b/apps/explorer/lib/explorer/smart_contract/compiler_version.ex @@ -91,7 +91,7 @@ defmodule Explorer.SmartContract.CompilerVersion do end defp fetch_compiler_versions_sc_verified_enabled(compiler_list_fn, compiler_type) do - if Application.get_env(:explorer, :chain_type) == :zksync do + if Application.get_env(:explorer, :chain_type) == :zksync || Application.get_env(:explorer, :chain_type) == :via do # todo: refactor opportunity, currently, Blockscout 2 identical requests to microservice in order to get # Solc and Zk compiler versions case compiler_list_fn.() do diff --git a/apps/explorer/lib/explorer/smart_contract/helper.ex b/apps/explorer/lib/explorer/smart_contract/helper.ex index 1f972a99209f..b6ca6ba39f2f 100644 --- a/apps/explorer/lib/explorer/smart_contract/helper.ex +++ b/apps/explorer/lib/explorer/smart_contract/helper.ex @@ -130,7 +130,7 @@ defmodule Explorer.SmartContract.Helper do def prepare_bytecode_for_microservice(body, creation_input, deployed_bytecode) def prepare_bytecode_for_microservice(body, creation_input, deployed_bytecode) when is_nil(creation_input) do - if Application.get_env(:explorer, :chain_type) == :zksync do + if Application.get_env(:explorer, :chain_type) == :zksync || Application.get_env(:explorer, :chain_type) == :via do body |> Map.put("code", deployed_bytecode) else @@ -193,7 +193,7 @@ defmodule Explorer.SmartContract.Helper do "chainId" => Application.get_env(:block_scout_web, :chain_id) } - if Application.get_env(:explorer, :chain_type) == :zksync do + if Application.get_env(:explorer, :chain_type) == :zksync || Application.get_env(:explorer, :chain_type) == :via do {nil, deployed_bytecode, metadata} else case SmartContract.creation_transaction_with_bytecode(address_hash) do diff --git a/apps/explorer/lib/explorer/smart_contract/rust_verifier_interface_behaviour.ex b/apps/explorer/lib/explorer/smart_contract/rust_verifier_interface_behaviour.ex index 0de3d6639fd7..2df0154081fd 100644 --- a/apps/explorer/lib/explorer/smart_contract/rust_verifier_interface_behaviour.ex +++ b/apps/explorer/lib/explorer/smart_contract/rust_verifier_interface_behaviour.ex @@ -51,6 +51,18 @@ defmodule Explorer.SmartContract.RustVerifierInterfaceBehaviour do http_post_request(solidity_standard_json_verification_url(), append_metadata(body, metadata), true) end + def via_verify_standard_json_input( + %{ + "code" => _, + "solcCompiler" => _, + "zkCompiler" => _, + "input" => _ + } = body, + metadata + ) do + http_post_request(solidity_standard_json_verification_url(), append_metadata(body, metadata), true) + end + def vyper_verify_multipart( %{ "bytecode" => _, @@ -208,10 +220,12 @@ defmodule Explorer.SmartContract.RustVerifierInterfaceBehaviour do end defp verifier_path do - if Application.get_env(:explorer, :chain_type) == :zksync do - "/zksync-verifier" - else - "/verifier" + chain_type = Application.get_env(:explorer, :chain_type) + + cond do + chain_type == :zksync -> "/zksync-verifier" + chain_type == :via -> "/zksync-verifier" + true -> "/verifier" end end diff --git a/apps/explorer/lib/explorer/smart_contract/solidity/publisher.ex b/apps/explorer/lib/explorer/smart_contract/solidity/publisher.ex index 8c97bd77e59a..b800abc99dfa 100644 --- a/apps/explorer/lib/explorer/smart_contract/solidity/publisher.ex +++ b/apps/explorer/lib/explorer/smart_contract/solidity/publisher.ex @@ -400,7 +400,8 @@ defmodule Explorer.SmartContract.Solidity.Publisher do } base_attributes - |> (&if(Application.get_env(:explorer, :chain_type) == :zksync, + |> (&if( + Application.get_env(:explorer, :chain_type) == :zksync || Application.get_env(:explorer, :chain_type) == :via, do: Map.put(&1, :zk_compiler_version, params["zk_compiler_version"]), else: &1 )).() @@ -446,7 +447,7 @@ defmodule Explorer.SmartContract.Solidity.Publisher do end defp maybe_add_zksync_specific_data(params) do - if Application.get_env(:explorer, :chain_type) == :zksync do + if Application.get_env(:explorer, :chain_type) == :zksync || Application.get_env(:explorer, :chain_type) == :via do Map.put(params, "constructor_arguments", SmartContract.zksync_get_constructor_arguments(params["address_hash"])) else params diff --git a/apps/explorer/lib/explorer/smart_contract/solidity/verifier.ex b/apps/explorer/lib/explorer/smart_contract/solidity/verifier.ex index ca3b3eecc00f..06501d9deee9 100644 --- a/apps/explorer/lib/explorer/smart_contract/solidity/verifier.ex +++ b/apps/explorer/lib/explorer/smart_contract/solidity/verifier.ex @@ -132,7 +132,7 @@ defmodule Explorer.SmartContract.Solidity.Verifier do {creation_transaction_input, deployed_bytecode, verifier_metadata} = fetch_data_for_verification(address_hash) verification_params = - if Application.get_env(:explorer, :chain_type) == :zksync do + if Application.get_env(:explorer, :chain_type) == :zksync || Application.get_env(:explorer, :chain_type) == :via do %{ "solcCompiler" => params["compiler_version"], "zkCompiler" => params["zk_compiler_version"], @@ -145,7 +145,8 @@ defmodule Explorer.SmartContract.Solidity.Verifier do verification_params |> prepare_bytecode_for_microservice(creation_transaction_input, deployed_bytecode) |> Map.put("input", json_input) - |> (&if(Application.get_env(:explorer, :chain_type) == :zksync, + |> (&if( + Application.get_env(:explorer, :chain_type) == :zksync || Application.get_env(:explorer, :chain_type) == :via, do: RustVerifierInterface.zksync_verify_standard_json_input(&1, verifier_metadata), else: RustVerifierInterface.verify_standard_json_input(&1, verifier_metadata) )).() @@ -234,7 +235,7 @@ defmodule Explorer.SmartContract.Solidity.Verifier do %{"enabled" => optimization, "runs" => optimization_runs} = json_input["settings"]["optimizer"] optimization_runs = - if Application.get_env(:explorer, :chain_type) == :zksync, + if Application.get_env(:explorer, :chain_type) == :zksync || Application.get_env(:explorer, :chain_type) == :via, do: to_string(optimization_runs), else: optimization_runs diff --git a/apps/explorer/lib/explorer/third_party_integrations/sourcify.ex b/apps/explorer/lib/explorer/third_party_integrations/sourcify.ex index d23491a94cd4..4541b48a1739 100644 --- a/apps/explorer/lib/explorer/third_party_integrations/sourcify.ex +++ b/apps/explorer/lib/explorer/third_party_integrations/sourcify.ex @@ -370,7 +370,9 @@ defmodule Explorer.ThirdPartyIntegrations.Sourcify do runs = optimizer |> Map.get("runs") - |> (&if(Application.get_env(:explorer, :chain_type) == :zksync, + |> (&if( + Application.get_env(:explorer, :chain_type) == :zksync || + Application.get_env(:explorer, :chain_type) == :via, do: to_string(&1), else: &1 )).() diff --git a/apps/explorer/priv/via/migrations/20211202082101_make_tranaction_r_s_v_optional.exs b/apps/explorer/priv/via/migrations/20211202082101_make_tranaction_r_s_v_optional.exs new file mode 100644 index 000000000000..3e7b7884926e --- /dev/null +++ b/apps/explorer/priv/via/migrations/20211202082101_make_tranaction_r_s_v_optional.exs @@ -0,0 +1,17 @@ +defmodule Explorer.Repo.Via.Migrations.MakeTransactionRSVOptional do + use Ecto.Migration + + def change do + alter table(:transactions) do + modify(:r, :numeric, precision: 100, null: true) + end + + alter table(:transactions) do + modify(:s, :numeric, precision: 100, null: true) + end + + alter table(:transactions) do + modify(:v, :numeric, precision: 100, null: true) + end + end +end diff --git a/apps/explorer/priv/via/migrations/20231213171043_create_via_tables.exs b/apps/explorer/priv/via/migrations/20231213171043_create_via_tables.exs new file mode 100644 index 000000000000..e1bdafcba72c --- /dev/null +++ b/apps/explorer/priv/via/migrations/20231213171043_create_via_tables.exs @@ -0,0 +1,82 @@ +defmodule Explorer.Repo.Via.Migrations.CreateViaTables do + use Ecto.Migration + + def change do + create table(:via_lifecycle_l1_transactions, primary_key: false) do + add(:id, :integer, null: false, primary_key: true) + add(:hash, :bytea, null: false) + add(:timestamp, :"timestamp without time zone", null: false) + timestamps(null: false, type: :utc_datetime_usec) + end + + create(unique_index(:via_lifecycle_l1_transactions, :hash)) + + create table(:via_transaction_batches, primary_key: false) do + add(:number, :integer, null: false, primary_key: true) + add(:timestamp, :"timestamp without time zone", null: false) + add(:l1_transaction_count, :integer, null: false) + add(:l2_transaction_count, :integer, null: false) + add(:root_hash, :bytea, null: false) + add(:l1_gas_price, :numeric, precision: 100, null: false) + add(:l2_fair_gas_price, :numeric, precision: 100, null: false) + add(:start_block, :integer, null: false) + add(:end_block, :integer, null: false) + + add( + :commit_id, + references(:via_lifecycle_l1_transactions, on_delete: :restrict, on_update: :update_all, type: :integer), + null: true + ) + + add( + :prove_id, + references(:via_lifecycle_l1_transactions, on_delete: :restrict, on_update: :update_all, type: :integer), + null: true + ) + + add( + :execute_id, + references(:via_lifecycle_l1_transactions, on_delete: :restrict, on_update: :update_all, type: :integer), + null: true + ) + + timestamps(null: false, type: :utc_datetime_usec) + end + + create table(:via_batch_l2_transactions, primary_key: false) do + add( + :batch_number, + references(:via_transaction_batches, + column: :number, + on_delete: :delete_all, + on_update: :update_all, + type: :integer + ), + null: false + ) + + add(:transaction_hash, :bytea, null: false, primary_key: true) + timestamps(null: false, type: :utc_datetime_usec) + end + + create(index(:via_batch_l2_transactions, :batch_number)) + + create table(:via_batch_l2_blocks, primary_key: false) do + add( + :batch_number, + references(:via_transaction_batches, + column: :number, + on_delete: :delete_all, + on_update: :update_all, + type: :integer + ), + null: false + ) + + add(:hash, :bytea, null: false, primary_key: true) + timestamps(null: false, type: :utc_datetime_usec) + end + + create(index(:via_batch_l2_blocks, :batch_number)) + end +end diff --git a/apps/explorer/priv/via/migrations/20240716095237_add_zk_compiler_version_to_smart_contracts.exs b/apps/explorer/priv/via/migrations/20240716095237_add_zk_compiler_version_to_smart_contracts.exs new file mode 100644 index 000000000000..e6bbfcfb9e31 --- /dev/null +++ b/apps/explorer/priv/via/migrations/20240716095237_add_zk_compiler_version_to_smart_contracts.exs @@ -0,0 +1,10 @@ +defmodule Explorer.Repo.Via.Migrations.AddZkCompilerVersionToSmartContracts do + use Ecto.Migration + + def change do + alter table(:smart_contracts) do + add(:zk_compiler_version, :string, null: true) + modify(:optimization_runs, :string, null: true) + end + end +end diff --git a/apps/explorer/priv/via/migrations/20241028102853_add_contract_code_refetched.exs b/apps/explorer/priv/via/migrations/20241028102853_add_contract_code_refetched.exs new file mode 100644 index 000000000000..0767b24f8211 --- /dev/null +++ b/apps/explorer/priv/via/migrations/20241028102853_add_contract_code_refetched.exs @@ -0,0 +1,13 @@ +defmodule Explorer.Repo.Via.Migrations.AddContractCodeRefetched do + use Ecto.Migration + + def change do + alter table(:addresses) do + add(:contract_code_refetched, :boolean, default: false) + end + + execute(""" + ALTER TABLE addresses ALTER COLUMN contract_code_refetched SET DEFAULT true; + """) + end +end diff --git a/apps/explorer/test/explorer/smart_contract/solidity/publisher_test.exs b/apps/explorer/test/explorer/smart_contract/solidity/publisher_test.exs index 2fcdec3d453d..5727775848db 100644 --- a/apps/explorer/test/explorer/smart_contract/solidity/publisher_test.exs +++ b/apps/explorer/test/explorer/smart_contract/solidity/publisher_test.exs @@ -1,4 +1,5 @@ -if Application.compile_env(:explorer, :chain_type) !== :zksync do +if Application.compile_env(:explorer, :chain_type) !== :zksync && + Application.compile_env(:explorer, :chain_type) !== :via do defmodule Explorer.SmartContract.Solidity.PublisherTest do use ExUnit.Case, async: true diff --git a/apps/explorer/test/explorer/smart_contract/solidity/verifier_test.exs b/apps/explorer/test/explorer/smart_contract/solidity/verifier_test.exs index 89dbd97ba48c..a5d1886fc7d1 100644 --- a/apps/explorer/test/explorer/smart_contract/solidity/verifier_test.exs +++ b/apps/explorer/test/explorer/smart_contract/solidity/verifier_test.exs @@ -1,4 +1,5 @@ -if Application.compile_env(:explorer, :chain_type) !== :zksync do +if Application.compile_env(:explorer, :chain_type) !== :zksync && + Application.compile_env(:explorer, :chain_type) !== :via do defmodule Explorer.SmartContract.Solidity.VerifierTest do use ExUnit.Case, async: true use Explorer.DataCase diff --git a/apps/explorer/test/explorer/smart_contract/vyper/publisher_test.exs b/apps/explorer/test/explorer/smart_contract/vyper/publisher_test.exs index 7e31394cccaa..e8b6ce919515 100644 --- a/apps/explorer/test/explorer/smart_contract/vyper/publisher_test.exs +++ b/apps/explorer/test/explorer/smart_contract/vyper/publisher_test.exs @@ -1,4 +1,5 @@ -if Application.compile_env(:explorer, :chain_type) !== :zksync do +if Application.compile_env(:explorer, :chain_type) !== :zksync && + Application.compile_env(:explorer, :chain_type) !== :via do defmodule Explorer.SmartContract.Vyper.PublisherTest do use ExUnit.Case, async: true diff --git a/apps/explorer/test/support/factory.ex b/apps/explorer/test/support/factory.ex index d8f52ebba90e..ddee7ce9d414 100644 --- a/apps/explorer/test/support/factory.ex +++ b/apps/explorer/test/support/factory.ex @@ -68,7 +68,7 @@ defmodule Explorer.Factory do alias Ueberauth.Auth.{Extra, Info} alias Ueberauth.Auth - if @chain_type == :zksync do + if @chain_type == :zksync || @chain_type == :via do @optimization_runs "1" else @optimization_runs 1 @@ -281,6 +281,13 @@ defmodule Explorer.Factory do } end + :via -> + defp address_factory_chain_type_fields() do + %{ + contract_code_refetched: true + } + end + _ -> defp address_factory_chain_type_fields(), do: %{} end diff --git a/apps/indexer/lib/indexer/fetcher/via/batches_status_tracker.ex b/apps/indexer/lib/indexer/fetcher/via/batches_status_tracker.ex new file mode 100644 index 000000000000..42dc9be76f7c --- /dev/null +++ b/apps/indexer/lib/indexer/fetcher/via/batches_status_tracker.ex @@ -0,0 +1,242 @@ +defmodule Indexer.Fetcher.Via.BatchesStatusTracker do + @moduledoc """ + Updates batches statuses and imports historical batches to the `via_transaction_batches` table. + + Repetitiveness is supported by sending the following statuses every `recheck_interval` seconds: + - `:check_committed`: Discover batches committed to L1 + - `:check_proven`: Discover batches proven in L1 + - `:check_executed`: Discover batches executed on L1 + - `:recover_batches`: Recover missed batches found during the handling of the three previous messages + - `:check_historical`: Check if the imported batches chain does not start with Batch #0 + + The initial message is `:check_committed`. If it is discovered that updating batches + in the `via_transaction_batches` table is not possible because some are missing, + `:recover_batches` is sent. The next messages are `:check_proven` and `:check_executed`. + Both could result in sending `:recover_batches` as well. + + The logic ensures that every handler emits the `:recover_batches` message to return to + the previous "progressing" state. If `:recover_batches` is called during handling `:check_committed`, + it will be sent again after finishing batch recovery. Similar logic applies to `:check_proven` and + `:check_executed`. + + The last message in the loop is `:check_historical`. + + |---------------------------------------------------------------------------| + |-> check_committed -> check_proven -> check_executed -> check_historical ->| + | ^ | ^ | ^ + v | v | v | + recover_batches recover_batches recover_batches + + If a batch status change is discovered during handling of `check_committed`, `check_proven`, + or `check_executed` messages, the corresponding L1 transactions are imported and associated + with the batches. Rollup transactions and blocks are not re-associated since it is assumed + to be done by `Indexer.Fetcher.Via.TransactionBatch` or during handling of + the `recover_batches` message. + + The `recover_batches` handler downloads batch information from RPC and sets its actual L1 state + by linking with L1 transactions. + + The `check_historical` message initiates the check if the tail of the batch chain is Batch 0. + If the tail is missing, batches are downloaded from RPC in chunks of `batches_max_range` in every + iteration. The batches are imported together with associated L1 transactions. + """ + + use GenServer + use Indexer.Fetcher + + require Logger + + # alias Explorer.Chain.Events.Publisher + # TODO: publish event when new committed batches appear + + alias Indexer.Fetcher.Via.Discovery.Workers + alias Indexer.Fetcher.Via.StatusTracking.{Committed, Executed, Proven} + + def child_spec(start_link_arguments) do + spec = %{ + id: __MODULE__, + start: {__MODULE__, :start_link, start_link_arguments}, + restart: :transient, + type: :worker + } + + Supervisor.child_spec(spec, []) + end + + def start_link(args, gen_server_options \\ []) do + GenServer.start_link(__MODULE__, args, Keyword.put_new(gen_server_options, :name, __MODULE__)) + end + + @impl GenServer + def init(args) do + Logger.metadata(fetcher: :via_batches_tracker) + + config_tracker = Application.get_all_env(:indexer)[Indexer.Fetcher.Via.BatchesStatusTracker] + l1_rpc = config_tracker[:via_l1_rpc] + recheck_interval = config_tracker[:recheck_interval] + config_fetcher = Application.get_all_env(:indexer)[Indexer.Fetcher.Via.TransactionBatch] + chunk_size = config_fetcher[:chunk_size] + batches_max_range = config_fetcher[:batches_max_range] + + Process.send(self(), :check_committed, []) + + {:ok, + %{ + config: %{ + json_l2_rpc_named_arguments: args[:json_rpc_named_arguments], + json_l1_rpc_named_arguments: [ + transport: EthereumJSONRPC.HTTP, + transport_options: [ + http: EthereumJSONRPC.HTTP.HTTPoison, + urls: [l1_rpc], + http_options: [ + recv_timeout: :timer.minutes(10), + timeout: :timer.minutes(10), + hackney: [pool: :ethereum_jsonrpc] + ] + ] + ], + recheck_interval: recheck_interval, + chunk_size: chunk_size, + batches_max_range: batches_max_range + }, + data: %{} + }} + end + + @impl GenServer + def handle_info({ref, _result}, state) do + Process.demonitor(ref, [:flush]) + {:noreply, state} + end + + # Handles the `:check_historical` message to download historical batches from RPC if necessary and + # import them to the `via_transaction_batches` table. The batches are imported together with L1 + # transactions associations, rollup blocks and transactions. + # Since it is the final handler in the loop, it schedules sending the `:check_committed` message + # to initiate the next iteration. The sending of the message is delayed, taking into account + # the time remaining after the previous handlers' execution. + # + # ## Parameters + # - `:check_historical`: the message triggering the handler + # - `state`: current state of the fetcher containing both the fetcher configuration + # and data re-used by different handlers. + # + # ## Returns + # - `{:noreply, new_state}` where `new_state` contains `data` empty + @impl GenServer + def handle_info(:check_historical, state) + when is_map(state) and is_map_key(state, :config) and is_map_key(state, :data) and + is_map_key(state.config, :recheck_interval) and is_map_key(state.config, :batches_max_range) and + is_map_key(state.config, :json_l2_rpc_named_arguments) and + is_map_key(state.config, :chunk_size) do + {handle_duration, _} = + :timer.tc(&Workers.batches_catchup/1, [ + %{ + batches_max_range: state.config.batches_max_range, + chunk_size: state.config.chunk_size, + json_rpc_named_arguments: state.config.json_l2_rpc_named_arguments + } + ]) + + Process.send_after( + self(), + :check_committed, + max(:timer.seconds(state.config.recheck_interval) - div(update_duration(state.data, handle_duration), 1000), 0) + ) + + {:noreply, %{state | data: %{}}} + end + + # Handles the `:recover_batches` message to download a set of batches from RPC and imports them + # to the `via_transaction_batches` table. It is expected that the message is sent from handlers updating + # batches statuses when they discover the absence of batches in the `via_transaction_batches` table. + # The batches are imported together with L1 transactions associations, rollup blocks, and transactions. + # + # ## Parameters + # - `:recover_batches`: the message triggering the handler + # - `state`: current state of the fetcher containing both the fetcher configuration + # and data related to the batches recovery: + # - `state.data.batches`: list of the batches to recover + # - `state.data.switched_from`: the message to send after the batch recovery + # + # ## Returns + # - `{:noreply, new_state}` where `new_state` contains updated `duration` of the iteration + @impl GenServer + def handle_info(:recover_batches, state) + when is_map(state) and is_map_key(state, :config) and is_map_key(state, :data) and + is_map_key(state.config, :json_l2_rpc_named_arguments) and is_map_key(state.config, :chunk_size) and + is_map_key(state.data, :batches) and is_map_key(state.data, :switched_from) do + {handle_duration, _} = + :timer.tc( + &Workers.get_full_batches_info_and_import/2, + [ + state.data.batches, + %{ + chunk_size: state.config.chunk_size, + json_rpc_named_arguments: state.config.json_l2_rpc_named_arguments + } + ] + ) + + Process.send(self(), state.data.switched_from, []) + + {:noreply, %{state | data: %{duration: update_duration(state.data, handle_duration)}}} + end + + # Handles `:check_committed`, `:check_proven`, and `:check_executed` messages to update the + # statuses of batches by associating L1 transactions with them. For different messages, it invokes + # different underlying functions due to different natures of discovering batches with changed status. + # Another reason why statuses are being tracked differently is the different pace of status changes: + # a batch is committed in a few minutes after sealing, proven in a few hours, and executed once in a day. + # Depending on the value returned from the underlying function, either a message (`:check_proven`, + # `:check_executed`, or `:check_historical`) to switch to the next status checker is sent, or a list + # of batches to recover is provided together with `:recover_batches`. + # + # ## Parameters + # - `input`: one of `:check_committed`, `:check_proven`, and `:check_executed` + # - `state`: the current state of the fetcher containing both the fetcher configuration + # and data reused by different handlers. + # + # ## Returns + # - `{:noreply, new_state}` where `new_state` contains the updated `duration` of the iteration, + # could also contain the list of batches to recover and the message to return back to + # the corresponding status update checker. + @impl GenServer + def handle_info(input, state) + when input in [:check_committed, :check_proven, :check_executed] do + {output, func} = + case input do + :check_committed -> {:check_proven, &Committed.look_for_batches_and_update/1} + :check_proven -> {:check_executed, &Proven.look_for_batches_and_update/1} + :check_executed -> {:check_historical, &Executed.look_for_batches_and_update/1} + end + + {handle_duration, result} = :timer.tc(func, [state.config]) + + {switch_to, state_data} = + case result do + :ok -> + {output, %{duration: update_duration(state.data, handle_duration)}} + + {:recovery_required, batches} -> + {:recover_batches, + %{ + switched_from: input, + batches: batches, + duration: update_duration(state.data, handle_duration) + }} + end + + Process.send(self(), switch_to, []) + {:noreply, %{state | data: state_data}} + end + + defp update_duration(data, cur_duration) do + if Map.has_key?(data, :duration) do + data.duration + cur_duration + else + cur_duration + end + end +end diff --git a/apps/indexer/lib/indexer/fetcher/via/discovery/batches_data.ex b/apps/indexer/lib/indexer/fetcher/via/discovery/batches_data.ex new file mode 100644 index 000000000000..fe3a48a412ca --- /dev/null +++ b/apps/indexer/lib/indexer/fetcher/via/discovery/batches_data.ex @@ -0,0 +1,413 @@ +defmodule Indexer.Fetcher.Via.Discovery.BatchesData do + @moduledoc """ + Provides main functionality to extract data for batches and associated with them + rollup blocks, rollup and L1 transactions. + """ + + alias EthereumJSONRPC.Block.ByNumber + alias Indexer.Fetcher.Via.Utils.Rpc + + import Indexer.Fetcher.Via.Utils.Logging, only: [log_info: 1, log_details_chunk_handling: 4] + import EthereumJSONRPC, only: [quantity_to_integer: 1] + + @doc """ + Downloads batches, associates rollup blocks and transactions, and imports the results into the database. + Data is retrieved from the RPC endpoint in chunks of `chunk_size`. + + ## Parameters + - `batches`: Either a tuple of two integers, `start_batch_number` and `end_batch_number`, defining + the range of batches to receive, or a list of batch numbers, `batches_list`. + - `config`: Configuration containing `chunk_size` to limit the amount of data requested from the RPC endpoint, + and `json_rpc_named_arguments` defining parameters for the RPC connection. + + ## Returns + - `{batches_to_import, l2_blocks_to_import, l2_transactions_to_import}` + where + - `batches_to_import` is a map of batches data + - `l2_blocks_to_import` is a list of blocks associated with batches by batch numbers + - `l2_transactions_to_import` is a list of transactions associated with batches by batch numbers + """ + @spec extract_data_from_batches([integer()] | {integer(), integer()}, %{ + :chunk_size => pos_integer(), + :json_rpc_named_arguments => any(), + optional(any()) => any() + }) :: {map(), list(), list()} + def extract_data_from_batches(batches, config) + + def extract_data_from_batches({start_batch_number, end_batch_number}, config) + when is_integer(start_batch_number) and is_integer(end_batch_number) and + is_map(config) do + start_batch_number..end_batch_number + |> Enum.to_list() + |> do_extract_data_from_batches(config) + end + + def extract_data_from_batches(batches_list, config) + when is_list(batches_list) and + is_map(config) do + batches_list + |> do_extract_data_from_batches(config) + end + + defp do_extract_data_from_batches(batches_list, config) when is_list(batches_list) do + initial_batches_to_import = collect_batches_details(batches_list, config) + log_info("Collected details for #{length(Map.keys(initial_batches_to_import))} batches") + + batches_to_import = get_block_ranges(initial_batches_to_import, config) + + {l2_blocks_to_import, l2_transactions_to_import} = get_l2_blocks_and_transactions(batches_to_import, config) + log_info("Linked #{length(l2_blocks_to_import)} L2 blocks and #{length(l2_transactions_to_import)} L2 transactions") + + {batches_to_import, l2_blocks_to_import, l2_transactions_to_import} + end + + @doc """ + Collects all unique L1 transactions from the given list of batches, including transactions + that change the status of a batch and their timestamps. + + **Note**: Every map describing an L1 transaction in the response is not ready for importing into + the database since it does not contain `:id` elements. + + ## Parameters + - `batches`: A list of maps describing batches. Each map is expected to define the following + elements: `commit_transaction_hash`, `commit_timestamp`, `prove_transaction_hash`, `prove_timestamp`, + `executed_transaction_hash`, `executed_timestamp`. + + ## Returns + - `l1_transactions`: A map where keys are L1 transaction hashes, and values are maps containing + transaction hashes and timestamps. + """ + @spec collect_l1_transactions(list()) :: map() + def collect_l1_transactions(batches) + when is_list(batches) do + l1_transactions = + batches + |> Enum.reduce(%{}, fn batch, l1_transactions -> + [ + %{hash: batch.commit_transaction_hash, timestamp: batch.commit_timestamp}, + %{hash: batch.prove_transaction_hash, timestamp: batch.prove_timestamp}, + %{hash: batch.executed_transaction_hash, timestamp: batch.executed_timestamp} + ] + |> Enum.reduce(l1_transactions, fn l1_transaction, acc -> + # checks if l1_transaction is not empty and adds to acc + add_l1_transaction_to_list(acc, l1_transaction) + end) + end) + + log_info("Collected #{length(Map.keys(l1_transactions))} L1 hashes") + + l1_transactions + end + + defp add_l1_transaction_to_list(l1_transactions, l1_transaction) do + if l1_transaction.hash != Rpc.get_binary_zero_hash() do + Map.put(l1_transactions, l1_transaction.hash, l1_transaction) + else + l1_transactions + end + end + + # Divides the list of batch numbers into chunks of size `chunk_size` to combine + # `zks_getL1BatchDetails` calls in one chunk together. To simplify further handling, + # each call is combined with the batch number in the JSON request identifier field. + # This allows parsing and associating every response with a particular batch, producing + # a list of maps describing the batches, ready for further handling. + # + # **Note**: The batches in the resulting map are not ready for importing into the DB. L1 transaction + # indices as well as the rollup blocks range must be added, and then batch descriptions + # must be pruned (see Indexer.Fetcher.Via.Utils.Db.prune_json_batch/1). + # + # ## Parameters + # - `batches_list`: A list of batch numbers. + # - `config`: A map containing `chunk_size` specifying the number of `zks_getL1BatchDetails` in + # one HTTP request, and `json_rpc_named_arguments` describing parameters for + # RPC connection. + # + # ## Returns + # - `batches_details`: A map where keys are batch numbers, and values are maps produced + # after parsing responses of `zks_getL1BatchDetails` calls. + defp collect_batches_details( + batches_list, + %{json_rpc_named_arguments: json_rpc_named_arguments, chunk_size: chunk_size} = _config + ) + when is_list(batches_list) do + batches_list_length = length(batches_list) + + {batches_details, _} = + batches_list + |> Enum.chunk_every(chunk_size) + |> Enum.reduce({%{}, 0}, fn chunk, {details, a} -> + log_details_chunk_handling("Collecting details", chunk, a * chunk_size, batches_list_length) + + requests = + chunk + |> Enum.map(fn batch_number -> + EthereumJSONRPC.request(%{ + id: batch_number, + method: "zks_getL1BatchDetails", + params: [batch_number] + }) + end) + + details = + requests + |> Rpc.fetch_batches_details(json_rpc_named_arguments) + |> Enum.reduce( + details, + fn resp, details -> + Map.put(details, resp.id, Rpc.transform_batch_details_to_map(resp.result)) + end + ) + + {details, a + 1} + end) + + batches_details + end + + # Extends each batch description with the block numbers specifying the start and end of + # a range of blocks included in the batch. The block ranges are obtained through the RPC call + # `zks_getL1BatchBlockRange`. The calls are combined in chunks of `chunk_size`. To distinguish + # each call in the chunk, they are combined with the batch number in the JSON request + # identifier field. + # + # ## Parameters + # - `batches`: A map of batch descriptions. + # - `config`: A map containing `chunk_size`, specifying the number of `zks_getL1BatchBlockRange` + # in one HTTP request, and `json_rpc_named_arguments` describing parameters for + # RPC connection. + # + # ## Returns + # - `updated_batches`: A map of batch descriptions where each description is updated with + # a range (elements `:start_block` and `:end_block`) of rollup blocks included in the batch. + defp get_block_ranges( + batches, + %{json_rpc_named_arguments: json_rpc_named_arguments, chunk_size: chunk_size} = _config + ) + when is_map(batches) do + keys = Map.keys(batches) + batches_list_length = length(keys) + + {updated_batches, _} = + keys + |> Enum.chunk_every(chunk_size) + |> Enum.reduce({batches, 0}, fn batches_chunk, {batches_with_block_ranges, a} -> + log_details_chunk_handling("Collecting block ranges", batches_chunk, a * chunk_size, batches_list_length) + + {request_block_ranges_for_batches(batches_chunk, batches, batches_with_block_ranges, json_rpc_named_arguments), + a + 1} + end) + + updated_batches + end + + # For a given list of rollup batch numbers, this function builds a list of requests + # to `zks_getL1BatchBlockRange`, executes them, and extends the batches' descriptions with + # ranges of rollup blocks associated with each batch. + # + # ## Parameters + # - `batches_numbers`: A list with batch numbers. + # - `batches_src`: A list containing original batches descriptions. + # - `batches_dst`: A map with extended batch descriptions containing rollup block ranges. + # - `json_rpc_named_arguments`: Describes parameters for RPC connection. + # + # ## Returns + # - An updated version of `batches_dst` with new entities containing rollup block ranges. + defp request_block_ranges_for_batches(batches_numbers, batches_src, batches_dst, json_rpc_named_arguments) do + batches_numbers + |> Enum.reduce([], fn batch_number, requests -> + batch = Map.get(batches_src, batch_number) + # Prepare requests list to get blocks ranges + case is_nil(batch.start_block) or is_nil(batch.end_block) do + true -> + [ + EthereumJSONRPC.request(%{ + id: batch_number, + method: "zks_getL1BatchBlockRange", + params: [batch_number] + }) + | requests + ] + + false -> + requests + end + end) + |> Rpc.fetch_blocks_ranges(json_rpc_named_arguments) + |> Enum.reduce(batches_dst, fn resp, updated_batches -> + Map.update!(updated_batches, resp.id, fn batch -> + [start_block, end_block] = resp.result + + Map.merge(batch, %{ + start_block: quantity_to_integer(start_block), + end_block: quantity_to_integer(end_block) + }) + end) + end) + end + + # Unfolds the ranges of rollup blocks in each batch description, makes RPC `eth_getBlockByNumber` calls, + # and builds two lists: a list of rollup blocks associated with each batch and a list of rollup transactions + # associated with each batch. RPC calls are made in chunks of `chunk_size`. To distinguish + # each call in the chunk, they are combined with the block number in the JSON request + # identifier field. + # + # ## Parameters + # - `batches`: A map of batch descriptions. Each description must contain `start_block` and + # `end_block`, specifying the range of blocks associated with the batch. + # - `config`: A map containing `chunk_size`, specifying the number of `eth_getBlockByNumber` + # in one HTTP request, and `json_rpc_named_arguments` describing parameters for + # RPC connection. + # + # ## Returns + # - {l2_blocks_to_import, l2_transactions_to_import}, where + # - `l2_blocks_to_import` contains a list of all rollup blocks with their associations with + # the provided batches. The association is a map with the block hash and the batch number. + # - `l2_transactions_to_import` contains a list of all rollup transactions with their associations + # with the provided batches. The association is a map with the transaction hash and + # the batch number. + defp get_l2_blocks_and_transactions( + batches, + %{json_rpc_named_arguments: json_rpc_named_arguments, chunk_size: chunk_size} = _config + ) do + # Extracts the rollup block range for every batch, unfolds it and + # build chunks of `eth_getBlockByNumber` calls + {blocks_to_batches, chunked_requests, cur_chunk, cur_chunk_size} = + batches + |> Map.keys() + |> Enum.reduce({%{}, [], [], 0}, fn batch_number, cur_batch_acc -> + batch = Map.get(batches, batch_number) + + batch.start_block..batch.end_block + |> Enum.chunk_every(chunk_size) + |> Enum.reduce(cur_batch_acc, fn blocks_range, cur_chunk_acc -> + build_blocks_map_and_chunks_of_rpc_requests(batch_number, blocks_range, cur_chunk_acc, chunk_size) + end) + end) + + # After the last iteration of the reduce loop it is a valid case + # when the calls from the last chunk are not in the chunks list, + # so it is appended + finalized_chunked_requests = + if cur_chunk_size > 0 do + [cur_chunk | chunked_requests] + else + chunked_requests + end + + # The chunks requests are sent to the RPC node and parsed to + # extract rollup block hashes and rollup transactions. + {blocks_associations, l2_transactions_to_import} = + finalized_chunked_requests + |> Enum.reduce({blocks_to_batches, []}, fn requests, {blocks, l2_transactions} -> + requests + |> Rpc.fetch_blocks_details(json_rpc_named_arguments) + |> extract_block_hash_and_transactions_list(blocks, l2_transactions) + end) + + # Check that amount of received transactions for a batch is correct + batches + |> Map.keys() + |> Enum.each(fn batch_number -> + batch = Map.get(batches, batch_number) + transactions_in_batch = batch.l1_transaction_count + batch.l2_transaction_count + + ^transactions_in_batch = + Enum.count(l2_transactions_to_import, fn transaction -> + transaction.batch_number == batch_number + end) + end) + + {Map.values(blocks_associations), l2_transactions_to_import} + end + + # For a given list of rollup block numbers, this function extends: + # - a map containing the linkage between rollup block numbers and batch numbers + # - a list of chunks of `eth_getBlockByNumber` requests + # - an uncompleted chunk of `eth_getBlockByNumber` requests + # + # ## Parameters + # - `batch_number`: The number of the batch to which the list of rollup blocks is linked. + # - `blocks_numbers`: A list of rollup block numbers. + # - `cur_chunk_acc`: The current state of the accumulator containing: + # - the current state of the map containing the linkage between rollup block numbers and batch numbers + # - the current state of the list of chunks of `eth_getBlockByNumber` requests + # - the current state of the uncompleted chunk of `eth_getBlockByNumber` requests + # - the size of the uncompleted chunk + # - `chunk_size`: The maximum size of the chunk of `eth_getBlockByNumber` requests + # + # ## Returns + # - {blocks_to_batches, chunked_requests, cur_chunk, cur_chunk_size}, where: + # - `blocks_to_batches`: An updated map with new blocks added. + # - `chunked_requests`: An updated list of lists of `eth_getBlockByNumber` requests. + # - `cur_chunk`: An uncompleted chunk of `eth_getBlockByNumber` requests or an empty list. + # - `cur_chunk_size`: The size of the uncompleted chunk. + defp build_blocks_map_and_chunks_of_rpc_requests(batch_number, blocks_numbers, cur_chunk_acc, chunk_size) do + blocks_numbers + |> Enum.reduce(cur_chunk_acc, fn block_number, {blocks_to_batches, chunked_requests, cur_chunk, cur_chunk_size} -> + blocks_to_batches = Map.put(blocks_to_batches, block_number, %{batch_number: batch_number}) + + cur_chunk = [ + ByNumber.request( + %{ + id: block_number, + number: block_number + }, + false + ) + | cur_chunk + ] + + if cur_chunk_size + 1 == chunk_size do + {blocks_to_batches, [cur_chunk | chunked_requests], [], 0} + else + {blocks_to_batches, chunked_requests, cur_chunk, cur_chunk_size + 1} + end + end) + end + + # Parses responses from `eth_getBlockByNumber` calls and extracts the block hash and the + # transactions lists. The block hash and transaction hashes are used to build associations + # with the corresponding batches by utilizing their numbers. + # + # This function is not part of the `Indexer.Fetcher.Via.Utils.Rpc` module since the resulting + # lists are too specific for further import to the database. + # + # ## Parameters + # - `json_responses`: A list of responses to `eth_getBlockByNumber` calls. + # - `l2_blocks`: A map of accumulated associations between rollup blocks and batches. + # - `l2_transactions`: A list of accumulated associations between rollup transactions and batches. + # + # ## Returns + # - {l2_blocks, l2_transactions}, where + # - `l2_blocks`: Updated map of accumulated associations between rollup blocks and batches. + # - `l2_transactions`: Updated list of accumulated associations between rollup transactions and batches. + defp extract_block_hash_and_transactions_list(json_responses, l2_blocks, l2_transactions) do + json_responses + |> Enum.reduce({l2_blocks, l2_transactions}, fn resp, {l2_blocks, l2_transactions} -> + {block, l2_blocks} = + Map.get_and_update(l2_blocks, resp.id, fn block -> + {block, Map.put(block, :hash, Map.get(resp.result, "hash"))} + end) + + l2_transactions = + case Map.get(resp.result, "transactions") do + nil -> + l2_transactions + + new_transactions -> + Enum.reduce(new_transactions, l2_transactions, fn l2_transaction_hash, l2_transactions -> + [ + %{ + batch_number: block.batch_number, + transaction_hash: l2_transaction_hash + } + | l2_transactions + ] + end) + end + + {l2_blocks, l2_transactions} + end) + end +end diff --git a/apps/indexer/lib/indexer/fetcher/via/discovery/workers.ex b/apps/indexer/lib/indexer/fetcher/via/discovery/workers.ex new file mode 100644 index 000000000000..d521963d9536 --- /dev/null +++ b/apps/indexer/lib/indexer/fetcher/via/discovery/workers.ex @@ -0,0 +1,174 @@ +defmodule Indexer.Fetcher.Via.Discovery.Workers do + @moduledoc """ + Provides functions to download a set of batches from RPC and import them to DB. + """ + + alias Indexer.Fetcher.Via.Utils.Db + alias Indexer.Prometheus.Instrumenter + + import Indexer.Fetcher.Via.Discovery.BatchesData, + only: [ + collect_l1_transactions: 1, + extract_data_from_batches: 2 + ] + + import Indexer.Fetcher.Via.Utils.Logging, only: [log_info: 1] + + @doc """ + Downloads minimal batches data (batch, associated rollup blocks and transactions hashes) from RPC + and imports them to the DB. Data is retrieved from the RPC endpoint in chunks of `chunk_size`. + Import of associated L1 transactions does not happen, assuming that the batch import happens regularly + enough and last downloaded batches does not contain L1 associations anyway. + Later `Indexer.Fetcher.Via.BatchesStatusTracker` will update any batch state changes and + import required L1 transactions. + + ## Parameters + - `start_batch_number`: The first batch in the range to download. + - `end_batch_number`: The last batch in the range to download. + - `config`: Configuration containing `chunk_size` to limit the amount of data requested from the RPC endpoint, + and `json_rpc_named_arguments` defining parameters for the RPC connection. + + ## Returns + - `:ok` + """ + @spec get_minimal_batches_info_and_import(non_neg_integer(), non_neg_integer(), %{ + :chunk_size => integer(), + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + optional(any()) => any() + }) :: :ok + def get_minimal_batches_info_and_import(start_batch_number, end_batch_number, config) + when is_integer(start_batch_number) and + is_integer(end_batch_number) and + (is_map(config) and is_map_key(config, :json_rpc_named_arguments) and + is_map_key(config, :chunk_size)) do + {batches_to_import, l2_blocks_to_import, l2_transactions_to_import} = + extract_data_from_batches({start_batch_number, end_batch_number}, config) + + batches_list_to_import = + batches_to_import + |> Map.values() + |> Enum.reduce([], fn batch, batches_list -> + [Db.prune_json_batch(batch) | batches_list] + end) + + Db.import_to_db( + batches_list_to_import, + [], + l2_transactions_to_import, + l2_blocks_to_import + ) + + last_batch = + batches_list_to_import + |> Enum.max_by(& &1.number, fn -> nil end) + + # credo:disable-for-next-line + if last_batch do + Instrumenter.set_latest_batch(last_batch.number, last_batch.timestamp) + end + + :ok + end + + @doc """ + Downloads batches, associates L1 transactions, rollup blocks and transactions with the given list of batch numbers, + and imports the results into the database. Data is retrieved from the RPC endpoint in chunks of `chunk_size`. + + ## Parameters + - `batches_numbers_list`: List of batch numbers to be retrieved. + - `config`: Configuration containing `chunk_size` to limit the amount of data requested from the RPC endpoint, + and `json_rpc_named_arguments` defining parameters for the RPC connection. + + ## Returns + - `:ok` + """ + @spec get_full_batches_info_and_import([integer()], %{ + :chunk_size => integer(), + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + optional(any()) => any() + }) :: :ok + def get_full_batches_info_and_import(batches_numbers_list, config) + when is_list(batches_numbers_list) and + (is_map(config) and is_map_key(config, :json_rpc_named_arguments) and + is_map_key(config, :chunk_size)) do + # Collect batches and linked L2 blocks and transaction + {batches_to_import, l2_blocks_to_import, l2_transactions_to_import} = + extract_data_from_batches(batches_numbers_list, config) + + # Collect L1 transactions associated with batches + l1_transactions = + batches_to_import + |> Map.values() + |> collect_l1_transactions() + |> Db.get_indices_for_l1_transactions() + + # Update batches with l1 transactions indices and prune unnecessary fields + batches_list_to_import = + batches_to_import + |> Map.values() + |> Enum.reduce([], fn batch, batches -> + [ + batch + |> Map.put(:commit_id, get_l1_transaction_id_by_hash(l1_transactions, batch.commit_transaction_hash)) + |> Map.put(:prove_id, get_l1_transaction_id_by_hash(l1_transactions, batch.prove_transaction_hash)) + |> Map.put(:execute_id, get_l1_transaction_id_by_hash(l1_transactions, batch.executed_transaction_hash)) + |> Db.prune_json_batch() + | batches + ] + end) + + Db.import_to_db( + batches_list_to_import, + Map.values(l1_transactions), + l2_transactions_to_import, + l2_blocks_to_import + ) + + :ok + end + + @doc """ + Retrieves the minimal batch number from the database. If the minimum batch number is not zero, + downloads `batches_max_range` batches older than the retrieved batch, along with associated + L1 transactions, rollup blocks, and transactions, and imports everything to the database. + + ## Parameters + - `config`: Configuration containing `chunk_size` to limit the amount of data requested from + the RPC endpoint and `json_rpc_named_arguments` defining parameters for the + RPC connection, `batches_max_range` defines how many of older batches must be downloaded. + + ## Returns + - `:ok` + """ + @spec batches_catchup(%{ + :batches_max_range => integer(), + :chunk_size => integer(), + :json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + optional(any()) => any() + }) :: :ok + def batches_catchup(config) + when is_map(config) and is_map_key(config, :json_rpc_named_arguments) and + is_map_key(config, :batches_max_range) and + is_map_key(config, :chunk_size) do + oldest_batch_number = Db.get_earliest_batch_number() + + if not is_nil(oldest_batch_number) && oldest_batch_number > 0 do + log_info("The oldest batch number is not zero. Historical baches will be fetched.") + start_batch_number = max(0, oldest_batch_number - config.batches_max_range) + end_batch_number = oldest_batch_number - 1 + + start_batch_number..end_batch_number + |> Enum.to_list() + |> get_full_batches_info_and_import(config) + end + + :ok + end + + defp get_l1_transaction_id_by_hash(l1_transactions, hash) do + l1_transactions + |> Map.get(hash) + |> Kernel.||(%{id: nil}) + |> Map.get(:id) + end +end diff --git a/apps/indexer/lib/indexer/fetcher/via/status_tracking/committed.ex b/apps/indexer/lib/indexer/fetcher/via/status_tracking/committed.ex new file mode 100644 index 000000000000..eac74471e74b --- /dev/null +++ b/apps/indexer/lib/indexer/fetcher/via/status_tracking/committed.ex @@ -0,0 +1,84 @@ +defmodule Indexer.Fetcher.Via.StatusTracking.Committed do + @moduledoc """ + Functionality to discover committed batches + """ + + alias Indexer.Fetcher.Via.Utils.{Db, Rpc} + + import Indexer.Fetcher.Via.StatusTracking.CommonUtils, + only: [ + check_if_batch_status_changed: 3, + associate_and_import_or_prepare_for_recovery: 4 + ] + + import Indexer.Fetcher.Via.Utils.Logging, only: [log_info: 1] + + @doc """ + Checks if the oldest uncommitted batch in the database has the associated L1 commitment transaction + by requesting new batch details from RPC. If so, analyzes the `BlockCommit` event emitted by + the transaction to explore all the batches committed by it. For all discovered batches, it updates + the database with new associations, importing information about L1 transactions. + If it is found that some of the discovered batches are absent in the database, the function + interrupts and returns the list of batch numbers that can be attempted to be recovered. + + ## Parameters + - `config`: Configuration containing `json_l1_rpc_named_arguments` and + `json_l2_rpc_named_arguments` defining parameters for the RPC connections. + + ## Returns + - `:ok` if no new committed batches are found, or if all found batches and the corresponding L1 + transactions are imported successfully. + - `{:recovery_required, batches_to_recover}` if the absence of new committed batches is + discovered; `batches_to_recover` contains the list of batch numbers. + """ + @spec look_for_batches_and_update(%{ + :json_l1_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + :json_l2_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + optional(any()) => any() + }) :: :ok | {:recovery_required, list()} + def look_for_batches_and_update( + %{ + json_l1_rpc_named_arguments: json_l1_rpc_named_arguments, + json_l2_rpc_named_arguments: json_l2_rpc_named_arguments + } = _config + ) do + case Db.get_earliest_sealed_batch_number() do + nil -> + :ok + + expected_batch_number -> + log_info("Checking if the batch #{expected_batch_number} was committed") + + {next_action, transaction_hash, l1_transactions} = + check_if_batch_status_changed(expected_batch_number, :commit_transaction, json_l2_rpc_named_arguments) + + case next_action do + :skip -> + :ok + + :look_for_batches -> + log_info("The batch #{expected_batch_number} looks like committed") + + # Todo: Should we fetch the inscription from BTC chain or only L2? + # commit_transaction_receipt = + # Rpc.fetch_transaction_receipt_by_hash(transaction_hash, json_l1_rpc_named_arguments) + + # batches_numbers_from_rpc = get_committed_batches_from_logs(commit_transaction_receipt["logs"]) + + associate_and_import_or_prepare_for_recovery( + [expected_batch_number], + l1_transactions, + transaction_hash, + :commit_id + ) + end + end + end + + defp get_committed_batches_from_logs(logs) do + committed_batches = Rpc.filter_logs_and_extract_topic_at(logs, @block_commit_event, 1) + log_info("Discovered #{length(committed_batches)} committed batches in the commitment transaction") + + committed_batches + end +end diff --git a/apps/indexer/lib/indexer/fetcher/via/status_tracking/common.ex b/apps/indexer/lib/indexer/fetcher/via/status_tracking/common.ex new file mode 100644 index 000000000000..d229aea6815d --- /dev/null +++ b/apps/indexer/lib/indexer/fetcher/via/status_tracking/common.ex @@ -0,0 +1,178 @@ +defmodule Indexer.Fetcher.Via.StatusTracking.CommonUtils do + @moduledoc """ + Common functions for status changes trackers + """ + + alias Explorer.Chain.Via.Reader + alias Indexer.Fetcher.Via.Utils.{Db, Rpc} + import Indexer.Fetcher.Via.Utils.Logging, only: [log_warning: 1] + + @doc """ + Fetches the details of the batch with the given number and checks if the representation of + the same batch in the database refers to the same commitment, proving, or executing transaction + depending on `transaction_type`. If the transaction state changes, the new transaction is prepared for + import to the database. + + ## Parameters + - `batch_number`: the number of the batch to check L1 transaction state. + - `transaction_type`: a type of the transaction to check, one of :commit_transaction, :execute_transaction, or :prove_transaction. + - `json_l2_rpc_named_arguments`: parameters for the RPC connections. + + ## Returns + - `{:look_for_batches, l1_transaction_hash, l1_transactions}` where + - `l1_transaction_hash` is the hash of the L1 transaction. + - `l1_transactions` is a map containing the transaction hash as a key, and values are maps + with transaction hashes and transaction timestamps. + - `{:skip, "", %{}}` means the batch is not found in the database or the state of the transaction + in the batch representation is the same as the state of the transaction for the batch + received from RPC. + """ + @spec check_if_batch_status_changed( + binary() | non_neg_integer(), + :commit_transaction | :execute_transaction | :prove_transaction, + EthereumJSONRPC.json_rpc_named_arguments() + ) :: {:look_for_batches, any(), any()} | {:skip, <<>>, %{}} + def check_if_batch_status_changed(batch_number, transaction_type, json_l2_rpc_named_arguments) + when (is_binary(batch_number) or is_integer(batch_number)) and + transaction_type in [:commit_transaction, :prove_transaction, :execute_transaction] and + is_list(json_l2_rpc_named_arguments) do + batch_from_rpc = Rpc.fetch_batch_details_by_batch_number(batch_number, json_l2_rpc_named_arguments) + + status_changed_or_error = + case Reader.batch( + batch_number, + necessity_by_association: %{ + get_association(transaction_type) => :optional + } + ) do + {:ok, batch_from_db} -> transactions_of_batch_changed?(batch_from_db, batch_from_rpc, transaction_type) + {:error, :not_found} -> :error + end + + l1_transaction = get_l1_transaction_from_batch(batch_from_rpc, transaction_type) + + if l1_transaction.hash != Rpc.get_binary_zero_hash() and status_changed_or_error in [true, :error] do + l1_transactions = Db.get_indices_for_l1_transactions(%{l1_transaction.hash => l1_transaction}) + + {:look_for_batches, l1_transaction.hash, l1_transactions} + else + {:skip, "", %{}} + end + end + + defp get_association(transaction_type) do + case transaction_type do + :commit_transaction -> :commit_transaction + :prove_transaction -> :prove_transaction + :execute_transaction -> :execute_transaction + end + end + + defp transactions_of_batch_changed?(batch_db, batch_json, transaction_type) do + transaction_hash_json = + case transaction_type do + :commit_transaction -> batch_json.commit_transaction_hash + :prove_transaction -> batch_json.prove_transaction_hash + :execute_transaction -> batch_json.executed_transaction_hash + end + + transaction_hash_db = + case transaction_type do + :commit_transaction -> batch_db.commit_transaction + :prove_transaction -> batch_db.prove_transaction + :execute_transaction -> batch_db.execute_transaction + end + + transaction_hash_db_bytes = + if is_nil(transaction_hash_db) do + Rpc.get_binary_zero_hash() + else + transaction_hash_db.hash.bytes + end + + transaction_hash_json != transaction_hash_db_bytes + end + + defp get_l1_transaction_from_batch(batch_from_rpc, transaction_type) do + case transaction_type do + :commit_transaction -> + %{hash: batch_from_rpc.commit_transaction_hash, timestamp: batch_from_rpc.commit_timestamp} + + :prove_transaction -> + %{hash: batch_from_rpc.prove_transaction_hash, timestamp: batch_from_rpc.prove_timestamp} + + :execute_transaction -> + %{hash: batch_from_rpc.executed_transaction_hash, timestamp: batch_from_rpc.executed_timestamp} + end + end + + @doc """ + Receives batches from the database, establishes an association between each batch and + the corresponding L1 transactions, and imports batches and L1 transactions into the database. + If the number of batches returned from the database does not match the requested batches, + the initial list of batch numbers is returned, assuming that they can be + used for the missed batch recovery procedure. + + ## Parameters + - `batches_numbers`: the list of batch numbers that must be updated. + - `l1_transactions`: a map containing transaction hashes as keys, and values are maps + with transaction hashes and transaction timestamps of L1 transactions to import to the database. + - `transaction_hash`: the hash of the L1 transaction to build an association with. + - `association_key`: the field in the batch description to build an association with L1 + transactions. + + ## Returns + - `:ok` if batches and the corresponding L1 transactions are imported successfully. + - `{:recovery_required, batches_to_recover}` if the absence of batches is discovered; + `batches_to_recover` contains the list of batch numbers. + """ + @spec associate_and_import_or_prepare_for_recovery([integer()], map(), binary(), :commit_id | :execute_id | :prove_id) :: + :ok | {:recovery_required, [integer()]} + def associate_and_import_or_prepare_for_recovery(batches_numbers, l1_transactions, transaction_hash, association_key) + when is_list(batches_numbers) and is_map(l1_transactions) and is_binary(transaction_hash) and + association_key in [:commit_id, :prove_id, :execute_id] do + case prepare_batches_to_import(batches_numbers, %{association_key => l1_transactions[transaction_hash][:id]}) do + {:error, batches_to_recover} -> + {:recovery_required, batches_to_recover} + + {:ok, batches_to_import} -> + Db.import_to_db(batches_to_import, Map.values(l1_transactions)) + :ok + end + end + + # Receives batches from the database and merges each batch's data with the data provided + # in `map_to_update`. If the number of batches returned from the database does not match + # with the requested batches, the initial list of batch numbers is returned, assuming that they + # can be used for the missed batch recovery procedure. + # + # ## Parameters + # - `batches`: the list of batch numbers that must be updated. + # - `map_to_update`: a map containing new data that must be applied to all requested batches. + # + # ## Returns + # - `{:ok, batches_to_import}` where `batches_to_import` is the list of batches ready to import + # with updated data. + # - `{:error, batches}` where `batches` contains the input list of batch numbers. + defp prepare_batches_to_import(batches, map_to_update) do + batches_from_db = Reader.batches(batches, []) + + if length(batches_from_db) == length(batches) do + batches_to_import = + batches_from_db + |> Enum.reduce([], fn batch, batches -> + [ + batch + |> Rpc.transform_transaction_batch_to_map() + |> Map.merge(map_to_update) + | batches + ] + end) + + {:ok, batches_to_import} + else + log_warning("Lack of batches received from DB to update") + {:error, batches} + end + end +end diff --git a/apps/indexer/lib/indexer/fetcher/via/status_tracking/executed.ex b/apps/indexer/lib/indexer/fetcher/via/status_tracking/executed.ex new file mode 100644 index 000000000000..a0ae9337985d --- /dev/null +++ b/apps/indexer/lib/indexer/fetcher/via/status_tracking/executed.ex @@ -0,0 +1,86 @@ +defmodule Indexer.Fetcher.Via.StatusTracking.Executed do + @moduledoc """ + Functionality to discover executed batches + """ + + alias Indexer.Fetcher.Via.Utils.{Db, Rpc} + + import Indexer.Fetcher.Via.StatusTracking.CommonUtils, + only: [ + check_if_batch_status_changed: 3, + associate_and_import_or_prepare_for_recovery: 4 + ] + + import Indexer.Fetcher.Via.Utils.Logging, only: [log_info: 1] + + # keccak256("BlockExecution(uint256,bytes32,bytes32)") + @block_execution_event "0x2402307311a4d6604e4e7b4c8a15a7e1213edb39c16a31efa70afb06030d3165" + + @doc """ + Checks if the oldest unexecuted batch in the database has the associated L1 executing transaction + by requesting new batch details from RPC. If so, analyzes the `BlockExecution` event emitted by + the transaction to explore all the batches executed by it. For all discovered batches, it updates + the database with new associations, importing information about L1 transactions. + If it is found that some of the discovered batches are absent in the database, the function + interrupts and returns the list of batch numbers that can be attempted to be recovered. + + ## Parameters + - `config`: Configuration containing `json_l1_rpc_named_arguments` and + `json_l2_rpc_named_arguments` defining parameters for the RPC connections. + + ## Returns + - `:ok` if no new executed batches are found, or if all found batches and the corresponding L1 + transactions are imported successfully. + - `{:recovery_required, batches_to_recover}` if the absence of new executed batches is + discovered; `batches_to_recover` contains the list of batch numbers. + """ + @spec look_for_batches_and_update(%{ + :json_l1_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + :json_l2_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + optional(any()) => any() + }) :: :ok | {:recovery_required, list()} + def look_for_batches_and_update( + %{ + json_l1_rpc_named_arguments: json_l1_rpc_named_arguments, + json_l2_rpc_named_arguments: json_l2_rpc_named_arguments + } = _config + ) do + case Db.get_earliest_unexecuted_batch_number() do + nil -> + :ok + + expected_batch_number -> + log_info("Checking if the batch #{expected_batch_number} was executed") + + {next_action, transaction_hash, l1_transactions} = + check_if_batch_status_changed(expected_batch_number, :execute_transaction, json_l2_rpc_named_arguments) + + case next_action do + :skip -> + :ok + + :look_for_batches -> + log_info("The batch #{expected_batch_number} looks like executed") + + # execute_transaction_receipt = + # Rpc.fetch_transaction_receipt_by_hash(transaction_hash, json_l1_rpc_named_arguments) + + # batches_numbers_from_rpc = get_executed_batches_from_logs(execute_transaction_receipt["logs"]) + + associate_and_import_or_prepare_for_recovery( + [expected_batch_number], + l1_transactions, + transaction_hash, + :execute_id + ) + end + end + end + + defp get_executed_batches_from_logs(logs) do + executed_batches = Rpc.filter_logs_and_extract_topic_at(logs, @block_execution_event, 1) + log_info("Discovered #{length(executed_batches)} executed batches in the executing transaction") + + executed_batches + end +end diff --git a/apps/indexer/lib/indexer/fetcher/via/status_tracking/proven.ex b/apps/indexer/lib/indexer/fetcher/via/status_tracking/proven.ex new file mode 100644 index 000000000000..f080f98111cb --- /dev/null +++ b/apps/indexer/lib/indexer/fetcher/via/status_tracking/proven.ex @@ -0,0 +1,190 @@ +defmodule Indexer.Fetcher.Via.StatusTracking.Proven do + @moduledoc """ + Functionality to discover proven batches + """ + + alias ABI.{FunctionSelector, TypeDecoder} + alias Indexer.Fetcher.Via.Utils.{Db, Rpc} + + import Indexer.Fetcher.Via.StatusTracking.CommonUtils, + only: [ + check_if_batch_status_changed: 3, + associate_and_import_or_prepare_for_recovery: 4 + ] + + import Indexer.Fetcher.Via.Utils.Logging, only: [log_error: 1, log_info: 1] + + @doc """ + Checks if the oldest unproven batch in the database has the associated L1 proving transaction + by requesting new batch details from RPC. If so, analyzes the calldata of the transaction + to explore all the batches proven by it. For all discovered batches, it updates + the database with new associations, importing information about L1 transactions. + If it is found that some of the discovered batches are absent in the database, the function + interrupts and returns the list of batch numbers that can be attempted to be recovered. + + ## Parameters + - `config`: Configuration containing `json_l1_rpc_named_arguments` and + `json_l2_rpc_named_arguments` defining parameters for the RPC connections. + + ## Returns + - `:ok` if no new proven batches are found, or if all found batches and the corresponding L1 + transactions are imported successfully. + - `{:recovery_required, batches_to_recover}` if the absence of new proven batches is + discovered; `batches_to_recover` contains the list of batch numbers. + """ + @spec look_for_batches_and_update(%{ + :json_l1_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + :json_l2_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), + optional(any()) => any() + }) :: :ok | {:recovery_required, list()} + def look_for_batches_and_update( + %{ + json_l1_rpc_named_arguments: json_l1_rpc_named_arguments, + json_l2_rpc_named_arguments: json_l2_rpc_named_arguments + } = _config + ) do + case Db.get_earliest_unproven_batch_number() do + nil -> + :ok + + expected_batch_number -> + log_info("Checking if the batch #{expected_batch_number} was proven") + + {next_action, transaction_hash, l1_transactions} = + check_if_batch_status_changed(expected_batch_number, :prove_transaction, json_l2_rpc_named_arguments) + + case next_action do + :skip -> + :ok + + :look_for_batches -> + # log_info("The batch #{expected_batch_number} looks like proven") + # # Todo: fetch the data from the inscription??? + # prove_transaction = Rpc.fetch_transaction_by_hash(transaction_hash, json_l1_rpc_named_arguments) + # batches_numbers_from_rpc = get_proven_batches_from_calldata(prove_transaction["input"]) + + associate_and_import_or_prepare_for_recovery( + [expected_batch_number], + l1_transactions, + transaction_hash, + :prove_id + ) + end + end + end + + defp get_proven_batches_from_calldata(calldata) do + # /// @param batchNumber Rollup batch number + # /// @param batchHash Hash of L2 batch + # /// @param indexRepeatedStorageChanges The serial number of the shortcut index that's used as a unique identifier for storage keys that were used twice or more + # /// @param numberOfLayer1Txs Number of priority operations to be processed + # /// @param priorityOperationsHash Hash of all priority operations from this batch + # /// @param l2LogsTreeRoot Root hash of tree that contains L2 -> L1 messages from this batch + # /// @param timestamp Rollup batch timestamp, have the same format as Ethereum batch constant + # /// @param commitment Verified input for the via circuit + # struct StoredBatchInfo { + # uint64 batchNumber; + # bytes32 batchHash; + # uint64 indexRepeatedStorageChanges; + # uint256 numberOfLayer1Txs; + # bytes32 priorityOperationsHash; + # bytes32 l2LogsTreeRoot; + # uint256 timestamp; + # bytes32 commitment; + # } + # /// @notice Recursive proof input data (individual commitments are constructed onchain) + # struct ProofInput { + # uint256[] recursiveAggregationInput; + # uint256[] serializedProof; + # } + proven_batches = + case calldata do + "0x7f61885c" <> encoded_params -> + # proveBatches(StoredBatchInfo calldata _prevBatch, StoredBatchInfo[] calldata _committedBatches, ProofInput calldata _proof) + # IO.inspect(FunctionSelector.decode("proveBatches((uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32),(uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32)[],(uint256[],uint256[]))")) + [_prev_batch, proven_batches, _proof] = + TypeDecoder.decode( + Base.decode16!(encoded_params, case: :lower), + %FunctionSelector{ + function: "proveBatches", + types: [ + tuple: [ + uint: 64, + bytes: 32, + uint: 64, + uint: 256, + bytes: 32, + bytes: 32, + uint: 256, + bytes: 32 + ], + array: + {:tuple, + [ + uint: 64, + bytes: 32, + uint: 64, + uint: 256, + bytes: 32, + bytes: 32, + uint: 256, + bytes: 32 + ]}, + tuple: [array: {:uint, 256}, array: {:uint, 256}] + ] + } + ) + + proven_batches + + "0xc37533bb" <> encoded_params -> + # proveBatchesSharedBridge(uint256 _chainId, StoredBatchInfo calldata _prevBatch, StoredBatchInfo[] calldata _committedBatches, ProofInput calldata _proof) + # IO.inspect(FunctionSelector.decode("proveBatchesSharedBridge(uint256,(uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32),(uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32)[],(uint256[],uint256[]))")) + [_chainid, _prev_batch, proven_batches, _proof] = + TypeDecoder.decode( + Base.decode16!(encoded_params, case: :lower), + %FunctionSelector{ + function: "proveBatchesSharedBridge", + types: [ + {:uint, 256}, + tuple: [ + uint: 64, + bytes: 32, + uint: 64, + uint: 256, + bytes: 32, + bytes: 32, + uint: 256, + bytes: 32 + ], + array: + {:tuple, + [ + uint: 64, + bytes: 32, + uint: 64, + uint: 256, + bytes: 32, + bytes: 32, + uint: 256, + bytes: 32 + ]}, + tuple: [array: {:uint, 256}, array: {:uint, 256}] + ] + } + ) + + proven_batches + + _ -> + log_error("Unknown calldata format: #{calldata}") + + [] + end + + log_info("Discovered #{length(proven_batches)} proven batches in the prove transaction") + + proven_batches + |> Enum.map(fn batch_info -> elem(batch_info, 0) end) + end +end diff --git a/apps/indexer/lib/indexer/fetcher/via/transaction_batch.ex b/apps/indexer/lib/indexer/fetcher/via/transaction_batch.ex new file mode 100644 index 000000000000..06c3801c3d31 --- /dev/null +++ b/apps/indexer/lib/indexer/fetcher/via/transaction_batch.ex @@ -0,0 +1,149 @@ +defmodule Indexer.Fetcher.Via.TransactionBatch do + @moduledoc """ + Discovers new batches and populates the `via_transaction_batches` table. + + Repetitiveness is supported by sending a `:continue` message to itself every `recheck_interval` seconds. + + Each iteration compares the number of the last handled batch stored in the state with the + latest batch available on the RPC node. If the rollup progresses, all batches between the + last handled batch (exclusively) and the latest available batch (inclusively) are downloaded from RPC + in chunks of `chunk_size` and imported into the `via_transaction_batches` table. If the latest + available batch is too far from the last handled batch, only `batches_max_range` batches are downloaded. + """ + + use GenServer + use Indexer.Fetcher + + require Logger + + alias Explorer.Chain.Via.Reader + alias Indexer.Fetcher.Via.Discovery.Workers + alias Indexer.Fetcher.Via.Utils.Rpc + + import Indexer.Fetcher.Via.Utils.Logging, only: [log_info: 1] + + def child_spec(start_link_arguments) do + spec = %{ + id: __MODULE__, + start: {__MODULE__, :start_link, start_link_arguments}, + restart: :transient, + type: :worker + } + + Supervisor.child_spec(spec, []) + end + + def start_link(args, gen_server_options \\ []) do + GenServer.start_link(__MODULE__, args, Keyword.put_new(gen_server_options, :name, __MODULE__)) + end + + @impl GenServer + def init(args) do + Logger.metadata(fetcher: :via_transaction_batches) + + config = Application.get_all_env(:indexer)[Indexer.Fetcher.Via.TransactionBatch] + chunk_size = config[:chunk_size] + recheck_interval = config[:recheck_interval] + batches_max_range = config[:batches_max_range] + + Process.send(self(), :init, []) + + {:ok, + %{ + config: %{ + chunk_size: chunk_size, + batches_max_range: batches_max_range, + json_rpc_named_arguments: args[:json_rpc_named_arguments], + recheck_interval: recheck_interval + }, + data: %{latest_handled_batch_number: 0} + }} + end + + @impl GenServer + def handle_info(:init, state) do + latest_handled_batch_number = + case Reader.latest_available_batch_number() do + nil -> + log_info("No batches found in DB. Will start with the latest batch available by RPC") + # The value received from RPC is decremented in order to not waste + # the first iteration of handling `:continue` message. + Rpc.fetch_latest_sealed_batch_number(state.config.json_rpc_named_arguments) - 1 + + latest_handled_batch_number -> + latest_handled_batch_number + end + + Process.send_after(self(), :continue, 2000) + + log_info("All batches including #{latest_handled_batch_number} are considered as handled") + + {:noreply, %{state | data: %{latest_handled_batch_number: latest_handled_batch_number}}} + end + + # Checks if the rollup progresses by comparing the recently stored batch + # with the latest batch received from RPC. If progress is detected, it downloads + # batches, builds their associations with rollup blocks and transactions, and + # imports the received data to the database. If the latest batch received from RPC + # is too far from the most recently stored batch, only `batches_max_range` batches + # are downloaded. All RPC calls to get batch details and receive transactions + # included in batches are made in chunks of `chunk_size`. + # + # After importing batch information, it schedules the next iteration by sending + # the `:continue` message. The sending of the message is delayed, taking into account + # the time remaining after downloading and importing processes. + # + # ## Parameters + # - `:continue`: The message triggering the handler. + # - `state`: The current state of the fetcher containing both the fetcher configuration + # and the latest handled batch number. + # + # ## Returns + # - `{:noreply, new_state}` where the latest handled batch number is updated with the largest + # of the batch numbers imported in the current iteration. + @impl GenServer + def handle_info( + :continue, + %{ + data: %{latest_handled_batch_number: latest_handled_batch_number}, + config: %{ + batches_max_range: batches_max_range, + json_rpc_named_arguments: json_rpc_named_arguments, + recheck_interval: recheck_interval, + chunk_size: _ + } + } = state + ) do + log_info("Checking for a new batch or batches") + + latest_sealed_batch_number = Rpc.fetch_latest_sealed_batch_number(json_rpc_named_arguments) + + {new_state, handle_duration} = + if latest_handled_batch_number < latest_sealed_batch_number do + start_batch_number = latest_handled_batch_number + 1 + end_batch_number = min(latest_sealed_batch_number, latest_handled_batch_number + batches_max_range) + + log_info("Handling the batch range #{start_batch_number}..#{end_batch_number}") + + {handle_duration, _} = + :timer.tc(&Workers.get_minimal_batches_info_and_import/3, [start_batch_number, end_batch_number, state.config]) + + { + %{state | data: %{latest_handled_batch_number: end_batch_number}}, + div(handle_duration, 1000) + } + else + {state, 0} + end + + Process.send_after(self(), :continue, max(:timer.seconds(recheck_interval) - handle_duration, 0)) + + {:noreply, new_state} + end + + @impl GenServer + def handle_info({ref, _result}, state) do + Process.demonitor(ref, [:flush]) + {:noreply, state} + end +end diff --git a/apps/indexer/lib/indexer/fetcher/via/utils/db.ex b/apps/indexer/lib/indexer/fetcher/via/utils/db.ex new file mode 100644 index 000000000000..5901f15932bc --- /dev/null +++ b/apps/indexer/lib/indexer/fetcher/via/utils/db.ex @@ -0,0 +1,206 @@ +defmodule Indexer.Fetcher.Via.Utils.Db do + @moduledoc """ + Common functions to simplify DB routines for Indexer.Fetcher.Via fetchers + """ + + alias Explorer.Chain + alias Explorer.Chain.Via.Reader + import Indexer.Fetcher.Via.Utils.Logging, only: [log_warning: 1, log_info: 1] + + @json_batch_fields_absent_in_db_batch [ + :commit_transaction_hash, + :commit_timestamp, + :prove_transaction_hash, + :prove_timestamp, + :executed_transaction_hash, + :executed_timestamp + ] + + @doc """ + Deletes elements in the batch description map to prepare the batch for importing to + the database. + + ## Parameters + - `batch_with_json_fields`: a map describing a batch with elements that could remain + after downloading batch details from RPC. + + ## Returns + - A map describing the batch compatible with the database import operation. + """ + @spec prune_json_batch(map()) :: map() + def prune_json_batch(batch_with_json_fields) + when is_map(batch_with_json_fields) do + Map.drop(batch_with_json_fields, @json_batch_fields_absent_in_db_batch) + end + + @doc """ + Gets the oldest imported batch number. + + ## Parameters + - none + + ## Returns + - A batch number or `nil` if there are no batches in the database. + """ + @spec get_earliest_batch_number() :: nil | non_neg_integer() + def get_earliest_batch_number do + case Reader.oldest_available_batch_number() do + nil -> + log_warning("No batches found in DB") + nil + + value -> + value + end + end + + @doc """ + Gets the oldest imported batch number without an associated commitment L1 transaction. + + ## Parameters + - none + + ## Returns + - A batch number or `nil` in cases where there are no batches in the database or + all batches in the database are marked as committed. + """ + @spec get_earliest_sealed_batch_number() :: nil | non_neg_integer() + def get_earliest_sealed_batch_number do + case Reader.earliest_sealed_batch_number() do + nil -> + log_info("No uncommitted batches found in DB") + nil + + value -> + value + end + end + + @doc """ + Gets the oldest imported batch number without an associated proving L1 transaction. + + ## Parameters + - none + + ## Returns + - A batch number or `nil` in cases where there are no batches in the database or + all batches in the database are marked as proven. + """ + @spec get_earliest_unproven_batch_number() :: nil | non_neg_integer() + def get_earliest_unproven_batch_number do + case Reader.earliest_unproven_batch_number() do + nil -> + log_info("No unproven batches found in DB") + nil + + value -> + value + end + end + + @doc """ + Gets the oldest imported batch number without an associated executing L1 transaction. + + ## Parameters + - none + + ## Returns + - A batch number or `nil` in cases where there are no batches in the database or + all batches in the database are marked as executed. + """ + @spec get_earliest_unexecuted_batch_number() :: nil | non_neg_integer() + def get_earliest_unexecuted_batch_number do + case Reader.earliest_unexecuted_batch_number() do + nil -> + log_info("No not executed batches found in DB") + nil + + value -> + value + end + end + + @doc """ + Indexes L1 transactions provided in the input map. For transactions that + are already in the database, existing indices are taken. For new transactions, + the next available indices are assigned. + + ## Parameters + - `new_l1_transactions`: A map of L1 transaction descriptions. The keys of the map are + transaction hashes. + + ## Returns + - `l1_transactions`: A map of L1 transaction descriptions. Each element is extended with + the key `:id`, representing the index of the L1 transaction in the + `via_lifecycle_l1_transactions` table. + """ + @spec get_indices_for_l1_transactions(map()) :: any() + # TODO: consider a way to remove duplicate with Arbitrum.Utils.Db + # credo:disable-for-next-line Credo.Check.Design.DuplicatedCode + def get_indices_for_l1_transactions(new_l1_transactions) + when is_map(new_l1_transactions) do + # Get indices for l1 transactions previously handled + l1_transactions = + new_l1_transactions + |> Map.keys() + |> Reader.lifecycle_transactions() + |> Enum.reduce(new_l1_transactions, fn {hash, id}, transactions -> + {_, transactions} = + Map.get_and_update!(transactions, hash.bytes, fn l1_transaction -> + {l1_transaction, Map.put(l1_transaction, :id, id)} + end) + + transactions + end) + + # Get the next index for the first new transaction based + # on the indices existing in DB + l1_transaction_next_id = Reader.next_id() + + # Assign new indices for the transactions which are not in + # the l1 transactions table yet + {updated_l1_transactions, _} = + l1_transactions + |> Map.keys() + |> Enum.reduce( + {l1_transactions, l1_transaction_next_id}, + fn hash, {transactions, next_id} -> + transaction = transactions[hash] + id = Map.get(transaction, :id) + + if is_nil(id) do + {Map.put(transactions, hash, Map.put(transaction, :id, next_id)), next_id + 1} + else + {transactions, next_id} + end + end + ) + + updated_l1_transactions + end + + @doc """ + Imports provided lists of batches and their associations with L1 transactions, rollup blocks, + and transactions to the database. + + ## Parameters + - `batches`: A list of maps with batch descriptions. + - `l1_transactions`: A list of maps with L1 transaction descriptions. Optional. + - `l2_transactions`: A list of maps with rollup transaction associations. Optional. + - `l2_blocks`: A list of maps with rollup block associations. Optional. + + ## Returns + n/a + """ + def import_to_db(batches, l1_transactions \\ [], l2_transactions \\ [], l2_blocks \\ []) + when is_list(batches) and is_list(l1_transactions) and is_list(l2_transactions) and is_list(l2_blocks) do + {:ok, _} = + Chain.import(%{ + via_lifecycle_transactions: %{params: l1_transactions}, + via_transaction_batches: %{params: batches}, + via_batch_transactions: %{params: l2_transactions}, + via_batch_blocks: %{params: l2_blocks}, + timeout: :infinity + }) + end +end diff --git a/apps/indexer/lib/indexer/fetcher/via/utils/logging.ex b/apps/indexer/lib/indexer/fetcher/via/utils/logging.ex new file mode 100644 index 000000000000..ff15d582e21f --- /dev/null +++ b/apps/indexer/lib/indexer/fetcher/via/utils/logging.ex @@ -0,0 +1,143 @@ +defmodule Indexer.Fetcher.Via.Utils.Logging do + @moduledoc """ + Common logging functions for Indexer.Fetcher.Via fetchers + """ + require Logger + + @doc """ + A helper function to log a message with warning severity. Uses `Logger.warning` facility. + + ## Parameters + - `msg`: a message to log + + ## Returns + `:ok` + """ + @spec log_warning(any()) :: :ok + def log_warning(msg) do + Logger.warning(msg) + end + + @doc """ + A helper function to log a message with info severity. Uses `Logger.info` facility. + + ## Parameters + - `msg`: a message to log + + ## Returns + `:ok` + """ + @spec log_info(any()) :: :ok + def log_info(msg) do + Logger.info(msg) + end + + @doc """ + A helper function to log a message with error severity. Uses `Logger.error` facility. + + ## Parameters + - `msg`: a message to log + + ## Returns + `:ok` + """ + @spec log_error(any()) :: :ok + def log_error(msg) do + Logger.error(msg) + end + + @doc """ + A helper function to log progress when handling batches in chunks. + + ## Parameters + - `prefix`: A prefix for the logging message. + - `chunk`: A list of batch numbers in the current chunk. + - `current_progress`: The total number of batches handled up to this moment. + - `total`: The total number of batches across all chunks. + + ## Returns + `:ok` + + ## Examples: + - `log_details_chunk_handling("A message", [1, 2, 3], 0, 10)` produces + `A message for batches 1..3. Progress 30%` + - `log_details_chunk_handling("A message", [2], 1, 10)` produces + `A message for batch 2. Progress 20%` + - `log_details_chunk_handling("A message", [35], 0, 1)` produces + `A message for batch 35.` + - `log_details_chunk_handling("A message", [45, 50, 51, 52, 60], 1, 1)` produces + `A message for batches 45, 50..52, 60.` + """ + @spec log_details_chunk_handling(binary(), list(), non_neg_integer(), non_neg_integer()) :: :ok + def log_details_chunk_handling(prefix, chunk, current_progress, total) + when is_binary(prefix) and is_list(chunk) and (is_integer(current_progress) and current_progress >= 0) and + (is_integer(total) and total > 0) do + chunk_length = length(chunk) + + progress = + case chunk_length == total do + true -> + "" + + false -> + percentage = + (current_progress + chunk_length) + |> Decimal.div(total) + |> Decimal.mult(100) + |> Decimal.round(2) + |> Decimal.to_string() + + " Progress: #{percentage}%" + end + + if chunk_length == 1 do + log_info("#{prefix} for batch ##{Enum.at(chunk, 0)}.") + else + log_info("#{prefix} for batches #{Enum.join(shorten_numbers_list(chunk), ", ")}.#{progress}") + end + end + + # Transform list of numbers to the list of string where consequent values + # are combined to be displayed as a range. + # + # ## Parameters + # - `msg`: a message to log + # + # ## Returns + # `shorten_list` - resulting list after folding + # + # ## Examples: + # [1, 2, 3] => ["1..3"] + # [1, 3] => ["1", "3"] + # [1, 2] => ["1..2"] + # [1, 3, 4, 5] => ["1", "3..5"] + defp shorten_numbers_list(numbers_list) do + {shorten_list, _, _} = + numbers_list + |> Enum.sort() + |> Enum.reduce({[], nil, nil}, fn number, {shorten_list, prev_range_start, prev_number} -> + shorten_numbers_list_impl(number, shorten_list, prev_range_start, prev_number) + end) + |> then(fn {shorten_list, prev_range_start, prev_number} -> + shorten_numbers_list_impl(prev_number, shorten_list, prev_range_start, prev_number) + end) + + Enum.reverse(shorten_list) + end + + defp shorten_numbers_list_impl(number, shorten_list, prev_range_start, prev_number) do + cond do + is_nil(prev_number) -> + {[], number, number} + + prev_number + 1 != number and prev_range_start == prev_number -> + {["#{prev_range_start}" | shorten_list], number, number} + + prev_number + 1 != number -> + {["#{prev_range_start}..#{prev_number}" | shorten_list], number, number} + + true -> + {shorten_list, prev_range_start, number} + end + end +end diff --git a/apps/indexer/lib/indexer/fetcher/via/utils/rpc.ex b/apps/indexer/lib/indexer/fetcher/via/utils/rpc.ex new file mode 100644 index 000000000000..d41dffc32396 --- /dev/null +++ b/apps/indexer/lib/indexer/fetcher/via/utils/rpc.ex @@ -0,0 +1,380 @@ +defmodule Indexer.Fetcher.Via.Utils.Rpc do + @moduledoc """ + Common functions to handle RPC calls for Indexer.Fetcher.Via fetchers + """ + + import EthereumJSONRPC, only: [json_rpc: 2, quantity_to_integer: 1] + + alias Explorer.Helper, as: ExplorerHelper + alias Indexer.Helper, as: IndexerHelper + require Logger + + @zero_hash "0000000000000000000000000000000000000000000000000000000000000000" + @zero_hash_binary <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>> + + @rpc_resend_attempts 20 + + def get_zero_hash do + @zero_hash + end + + def get_binary_zero_hash do + @zero_hash_binary + end + + @doc """ + Filters out logs from a list of transactions logs where topic #0 is `topic_0` and + builds a list of values located at position `position` in such logs. + + ## Parameters + - `logs`: The list of transaction logs to filter logs with a specific topic. + - `topic_0`: The value of topic #0 in the required logs. + - `position`: The topic number to be extracted from the topic lists of every log + and appended to the resulting list. + + ## Returns + - A list of values extracted from the required transaction logs. + - An empty list if no logs with the specified topic are found. + """ + @spec filter_logs_and_extract_topic_at(maybe_improper_list(), binary(), integer()) :: list() + def filter_logs_and_extract_topic_at(logs, topic_0, position) + when is_list(logs) and + is_binary(topic_0) and + (is_integer(position) and position >= 0 and position <= 3) do + logs + |> Enum.reduce([], fn log_entity, result -> + topics = log_entity["topics"] + + if Enum.at(topics, 0) == topic_0 do + [quantity_to_integer(Enum.at(topics, position)) | result] + else + result + end + end) + end + + defp from_iso8601_to_datetime(time_string) do + case is_nil(time_string) do + true -> + IndexerHelper.timestamp_to_datetime(0) + + false -> + case DateTime.from_iso8601(time_string) do + {:ok, datetime, _} -> + datetime + + {:error, _} -> + IndexerHelper.timestamp_to_datetime(0) + end + end + end + + defp json_transaction_id_to_hash(hash) do + case hash do + "0x" <> transaction_hash -> transaction_hash + nil -> @zero_hash + end + end + + defp string_hash_to_bytes_hash(hash) do + hash + |> json_transaction_id_to_hash() + |> Base.decode16!(case: :mixed) + end + + @doc """ + Transforms a map with batch data received from the `zks_getL1BatchDetails` call + into a map that can be used by Indexer.Fetcher.Via fetchers for further handling. + All hexadecimal hashes are converted to their decoded binary representation, + Unix and ISO8601 timestamps are converted to DateTime objects. + + ## Parameters + - `json_response`: Raw data received from the JSON RPC call. + + ## Returns + - A map containing minimal information about the batch. `start_block` and `end_block` + elements are set to `nil`. + """ + @spec transform_batch_details_to_map(map()) :: map() + def transform_batch_details_to_map(json_response) + when is_map(json_response) do + %{ + "number" => {:number, :ok}, + "timestamp" => {:timestamp, :ts_to_datetime}, + "l1TxCount" => {:l1_transaction_count, :ok}, + "l2TxCount" => {:l2_transaction_count, :ok}, + "rootHash" => {:root_hash, :str_to_byteshash}, + "commitTxHash" => {:commit_transaction_hash, :str_to_byteshash}, + "committedAt" => {:commit_timestamp, :iso8601_to_datetime}, + "proveTxHash" => {:prove_transaction_hash, :str_to_byteshash}, + "provenAt" => {:prove_timestamp, :iso8601_to_datetime}, + "executeTxHash" => {:executed_transaction_hash, :str_to_byteshash}, + "executedAt" => {:executed_timestamp, :iso8601_to_datetime}, + "l1GasPrice" => {:l1_gas_price, :ok}, + "l2FairGasPrice" => {:l2_fair_gas_price, :ok} + # :start_block added by request_block_ranges_by_rpc + # :end_block added by request_block_ranges_by_rpc + } + |> Enum.reduce(%{start_block: nil, end_block: nil}, fn {key, {key_atom, transform_type}}, batch_details_map -> + value_in_json_response = Map.get(json_response, key) + + Map.put( + batch_details_map, + key_atom, + case transform_type do + :iso8601_to_datetime -> from_iso8601_to_datetime(value_in_json_response) + :ts_to_datetime -> IndexerHelper.timestamp_to_datetime(value_in_json_response) + :str_to_txhash -> json_transaction_id_to_hash(value_in_json_response) + :str_to_byteshash -> string_hash_to_bytes_hash(value_in_json_response) + _ -> value_in_json_response + end + ) + end) + end + + @doc """ + Transforms a map with batch data received from the database into a map that + can be used by Indexer.Fetcher.Via fetchers for further handling. + + ## Parameters + - `batch`: A map containing a batch description received from the database. + + ## Returns + - A map containing simplified representation of the batch. Compatible with + the database import operation. + """ + def transform_transaction_batch_to_map(batch) + when is_map(batch) do + %{ + number: batch.number, + timestamp: batch.timestamp, + l1_transaction_count: batch.l1_transaction_count, + l2_transaction_count: batch.l2_transaction_count, + root_hash: batch.root_hash.bytes, + l1_gas_price: batch.l1_gas_price, + l2_fair_gas_price: batch.l2_fair_gas_price, + start_block: batch.start_block, + end_block: batch.end_block, + commit_id: batch.commit_id, + prove_id: batch.prove_id, + execute_id: batch.execute_id + } + end + + @doc """ + Retrieves batch details from the RPC endpoint using the `zks_getL1BatchDetails` call. + + ## Parameters + - `batch_number`: The batch number or identifier. + - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. + + ## Returns + - A map containing minimal batch details. It includes `start_block` and `end_block` + elements, both set to `nil`. + """ + @spec fetch_batch_details_by_batch_number(binary() | non_neg_integer(), EthereumJSONRPC.json_rpc_named_arguments()) :: + map() + def fetch_batch_details_by_batch_number(batch_number, json_rpc_named_arguments) + when (is_integer(batch_number) or is_binary(batch_number)) and is_list(json_rpc_named_arguments) do + req = + EthereumJSONRPC.request(%{ + id: batch_number, + method: "zks_getL1BatchDetails", + params: [batch_number] + }) + + error_message = &"Cannot call zks_getL1BatchDetails. Error: #{inspect(&1)}" + + {:ok, resp} = + IndexerHelper.repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, @rpc_resend_attempts) + + transform_batch_details_to_map(resp) + end + + @doc """ + Fetches transaction details from the RPC endpoint using the `eth_getTransactionByHash` call. + + ## Parameters + - `raw_hash`: The hash of the Ethereum transaction. It can be provided as a decoded binary + or hexadecimal string. + - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. + + ## Returns + - A map containing details of the transaction. + """ + @spec fetch_transaction_by_hash(binary(), EthereumJSONRPC.json_rpc_named_arguments()) :: map() + def fetch_transaction_by_hash(raw_hash, json_rpc_named_arguments) + when is_binary(raw_hash) and is_list(json_rpc_named_arguments) do + hash = ExplorerHelper.add_0x_prefix(raw_hash) + + req = + EthereumJSONRPC.request(%{ + id: 0, + method: "eth_getTransactionByHash", + params: [hash] + }) + + error_message = &"Cannot call eth_getTransactionByHash for hash #{hash}. Error: #{inspect(&1)}" + + {:ok, resp} = + IndexerHelper.repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, @rpc_resend_attempts) + + resp + end + + @doc """ + Fetches the transaction receipt from the RPC endpoint using the `eth_getTransactionReceipt` call. + + ## Parameters + - `raw_hash`: The hash of the Ethereum transaction. It can be provided as a decoded binary + or hexadecimal string. + - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. + + ## Returns + - A map containing the receipt details of the transaction. + """ + @spec fetch_transaction_receipt_by_hash(binary(), EthereumJSONRPC.json_rpc_named_arguments()) :: map() + def fetch_transaction_receipt_by_hash(raw_hash, json_rpc_named_arguments) + when is_binary(raw_hash) and is_list(json_rpc_named_arguments) do + hash = ExplorerHelper.add_0x_prefix(raw_hash) + + req = + EthereumJSONRPC.request(%{ + id: 0, + method: "eth_getTransactionReceipt", + params: [hash] + }) + + error_message = &"Cannot call eth_getTransactionReceipt for hash #{hash}. Error: #{inspect(&1)}" + + {:ok, resp} = + IndexerHelper.repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, @rpc_resend_attempts) + + resp + end + + @doc """ + Fetches the latest sealed batch number from the RPC endpoint using the `zks_L1BatchNumber` call. + + ## Parameters + - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. + + ## Returns + - A non-negative integer representing the latest sealed batch number. + """ + @spec fetch_latest_sealed_batch_number(EthereumJSONRPC.json_rpc_named_arguments()) :: nil | non_neg_integer() + def fetch_latest_sealed_batch_number(json_rpc_named_arguments) + when is_list(json_rpc_named_arguments) do + req = EthereumJSONRPC.request(%{id: 0, method: "zks_L1BatchNumber", params: []}) + + error_message = &"Cannot call zks_L1BatchNumber. Error: #{inspect(&1)}" + + {:ok, resp} = + IndexerHelper.repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, @rpc_resend_attempts) + + quantity_to_integer(resp) + end + + @doc """ + Fetches block details using multiple `eth_getBlockByNumber` RPC calls. + + ## Parameters + - `requests_list`: A list of `EthereumJSONRPC.Transport.request()` representing multiple + `eth_getBlockByNumber` RPC calls for different block numbers. + - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. + + ## Returns + - A list of responses containing details of the requested blocks. + """ + @spec fetch_blocks_details([EthereumJSONRPC.Transport.request()], EthereumJSONRPC.json_rpc_named_arguments()) :: + list() + def fetch_blocks_details(requests_list, json_rpc_named_arguments) + + def fetch_blocks_details([], _) do + [] + end + + def fetch_blocks_details(requests_list, json_rpc_named_arguments) + when is_list(requests_list) and is_list(json_rpc_named_arguments) do + error_message = &"Cannot call eth_getBlockByNumber. Error: #{inspect(&1)}" + + {:ok, responses} = + IndexerHelper.repeated_call( + &json_rpc/2, + [requests_list, json_rpc_named_arguments], + error_message, + @rpc_resend_attempts + ) + + responses + end + + @doc """ + Fetches batches details using multiple `zks_getL1BatchDetails` RPC calls. + + ## Parameters + - `requests_list`: A list of `EthereumJSONRPC.Transport.request()` representing multiple + `zks_getL1BatchDetails` RPC calls for different block numbers. + - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. + + ## Returns + - A list of responses containing details of the requested batches. + """ + @spec fetch_batches_details([EthereumJSONRPC.Transport.request()], EthereumJSONRPC.json_rpc_named_arguments()) :: + list() + def fetch_batches_details(requests_list, json_rpc_named_arguments) + + def fetch_batches_details([], _) do + [] + end + + def fetch_batches_details(requests_list, json_rpc_named_arguments) + when is_list(requests_list) and is_list(json_rpc_named_arguments) do + error_message = &"Cannot call zks_getL1BatchDetails. Error: #{inspect(&1)}" + + {:ok, responses} = + IndexerHelper.repeated_call( + &json_rpc/2, + [requests_list, json_rpc_named_arguments], + error_message, + @rpc_resend_attempts + ) + + responses + end + + @doc """ + Fetches block ranges included in the specified batches by using multiple + `zks_getL1BatchBlockRange` RPC calls. + + ## Parameters + - `requests_list`: A list of `EthereumJSONRPC.Transport.request()` representing multiple + `zks_getL1BatchBlockRange` RPC calls for different batch numbers. + - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. + + ## Returns + - A list of responses containing block ranges associated with the requested batches. + """ + @spec fetch_blocks_ranges([EthereumJSONRPC.Transport.request()], EthereumJSONRPC.json_rpc_named_arguments()) :: + list() + def fetch_blocks_ranges(requests_list, json_rpc_named_arguments) + + def fetch_blocks_ranges([], _) do + [] + end + + def fetch_blocks_ranges(requests_list, json_rpc_named_arguments) + when is_list(requests_list) and is_list(json_rpc_named_arguments) do + error_message = &"Cannot call zks_getL1BatchBlockRange. Error: #{inspect(&1)}" + + {:ok, responses} = + IndexerHelper.repeated_call( + &json_rpc/2, + [requests_list, json_rpc_named_arguments], + error_message, + @rpc_resend_attempts + ) + + responses + end +end diff --git a/apps/indexer/lib/indexer/prometheus/instrumenter.ex b/apps/indexer/lib/indexer/prometheus/instrumenter.ex index 47a71417fc6f..aff762356a4b 100644 --- a/apps/indexer/lib/indexer/prometheus/instrumenter.ex +++ b/apps/indexer/lib/indexer/prometheus/instrumenter.ex @@ -6,7 +6,7 @@ defmodule Indexer.Prometheus.Instrumenter do use Prometheus.Metric use Utils.CompileTimeEnvHelper, chain_type: [:explorer, :chain_type] - @rollups [:arbitrum, :zksync, :optimism, :polygon_zkevm, :scroll] + @rollups [:arbitrum, :zksync, :via, :optimism, :polygon_zkevm, :scroll] @histogram [ name: :block_full_processing_duration_microseconds, diff --git a/apps/indexer/lib/indexer/supervisor.ex b/apps/indexer/lib/indexer/supervisor.ex index ab96cb012731..a04f18f24bc5 100644 --- a/apps/indexer/lib/indexer/supervisor.ex +++ b/apps/indexer/lib/indexer/supervisor.ex @@ -55,6 +55,8 @@ defmodule Indexer.Supervisor do alias Indexer.Fetcher.Arbitrum.TrackingMessagesOnL1, as: ArbitrumTrackingMessagesOnL1 alias Indexer.Fetcher.ZkSync.BatchesStatusTracker, as: ZkSyncBatchesStatusTracker alias Indexer.Fetcher.ZkSync.TransactionBatch, as: ZkSyncTransactionBatch + alias Indexer.Fetcher.Via.BatchesStatusTracker, as: ViaBatchesStatusTracker + alias Indexer.Fetcher.Via.TransactionBatch, as: ViaTransactionBatch alias Indexer.Migrator.RecoveryWETHTokenTransfers @@ -214,6 +216,12 @@ defmodule Indexer.Supervisor do configure(ZkSyncBatchesStatusTracker.Supervisor, [ [json_rpc_named_arguments: json_rpc_named_arguments, memory_monitor: memory_monitor] ]), + configure(ViaTransactionBatch.Supervisor, [ + [json_rpc_named_arguments: json_rpc_named_arguments, memory_monitor: memory_monitor] + ]), + configure(ViaBatchesStatusTracker.Supervisor, [ + [json_rpc_named_arguments: json_rpc_named_arguments, memory_monitor: memory_monitor] + ]), configure(Indexer.Fetcher.PolygonZkevm.TransactionBatch.Supervisor, [ [json_rpc_named_arguments: json_rpc_named_arguments, memory_monitor: memory_monitor] ]), @@ -282,7 +290,7 @@ defmodule Indexer.Supervisor do |> maybe_add_block_reward_fetcher( {BlockReward.Supervisor, [[json_rpc_named_arguments: json_rpc_named_arguments, memory_monitor: memory_monitor]]} ) - |> maybe_add_nft_media_handler_processes() + # |> maybe_add_nft_media_handler_processes() Supervisor.init( all_fetchers, diff --git a/apps/indexer/lib/indexer/transform/addresses.ex b/apps/indexer/lib/indexer/transform/addresses.ex index ca4e4939ebff..8726ea588c39 100644 --- a/apps/indexer/lib/indexer/transform/addresses.ex +++ b/apps/indexer/lib/indexer/transform/addresses.ex @@ -74,7 +74,7 @@ defmodule Indexer.Transform.Addresses do %{from: :block_number, to: :fetched_coin_balance_block_number}, %{from: :to_address_hash, to: :hash} ], - if @chain_type == :zksync do + if @chain_type == :zksync || @chain_type == :via do [ %{from: :block_number, to: :fetched_coin_balance_block_number}, %{from: :created_contract_address_hash, to: :hash} diff --git a/config/config_helper.exs b/config/config_helper.exs index dc6524b6a441..954b80a4e149 100644 --- a/config/config_helper.exs +++ b/config/config_helper.exs @@ -25,6 +25,7 @@ defmodule ConfigHelper do suave: Explorer.Repo.Suave, zilliqa: Explorer.Repo.Zilliqa, zksync: Explorer.Repo.ZkSync, + via: Explorer.Repo.Via, neon: Explorer.Repo.Neon } |> Map.get(chain_type()) @@ -322,6 +323,7 @@ defmodule ConfigHelper do "zetachain", "zilliqa", "zksync", + "via", "neon" ] diff --git a/config/runtime.exs b/config/runtime.exs index a0c4744fd248..0d9f11409aea 100644 --- a/config/runtime.exs +++ b/config/runtime.exs @@ -35,7 +35,8 @@ config :block_scout_web, hide_scam_addresses: ConfigHelper.parse_bool_env_var("HIDE_SCAM_ADDRESSES"), show_tenderly_link: ConfigHelper.parse_bool_env_var("SHOW_TENDERLY_LINK"), sensitive_endpoints_api_key: System.get_env("API_SENSITIVE_ENDPOINTS_KEY"), - disable_api?: disable_api? + disable_api?: disable_api?, + bitcoin_explorer_url: System.get_env("BITCOIN_EXPLORER_URL", "https://mempool.space/tx") config :block_scout_web, :recaptcha, v2_client_key: System.get_env("RE_CAPTCHA_CLIENT_KEY"), @@ -1161,6 +1162,24 @@ config :indexer, Indexer.Fetcher.ZkSync.BatchesStatusTracker, config :indexer, Indexer.Fetcher.ZkSync.BatchesStatusTracker.Supervisor, enabled: ConfigHelper.parse_bool_env_var("INDEXER_ZKSYNC_BATCHES_ENABLED") +config :indexer, Indexer.Fetcher.Via.TransactionBatch, + chunk_size: ConfigHelper.parse_integer_env_var("INDEXER_VIA_BATCHES_CHUNK_SIZE", 50), + batches_max_range: ConfigHelper.parse_integer_env_var("INDEXER_VIA_NEW_BATCHES_MAX_RANGE", 50), + recheck_interval: ConfigHelper.parse_integer_env_var("INDEXER_VIA_NEW_BATCHES_RECHECK_INTERVAL", 60) + +config :indexer, Indexer.Fetcher.Via.TransactionBatch.Supervisor, + enabled: ConfigHelper.parse_bool_env_var("INDEXER_VIA_BATCHES_ENABLED") + +config :indexer, Indexer.Fetcher.Via.BatchesStatusTracker, + via_l1_rpc: System.get_env("INDEXER_VIA_L1_RPC"), + recheck_interval: ConfigHelper.parse_integer_env_var("INDEXER_VIA_BATCHES_STATUS_RECHECK_INTERVAL", 60) + +config :indexer, Indexer.Fetcher.Via.BatchesStatusTracker.Supervisor, + enabled: ConfigHelper.parse_bool_env_var("INDEXER_VIA_BATCHES_ENABLED") + +config :block_scout_web, + bitcoin_explorer_url: System.get_env("BITCOIN_EXPLORER_URL", "https://mempool.space/tx") + config :indexer, Indexer.Fetcher.Arbitrum.Messaging, arbsys_contract: ConfigHelper.safe_get_env("INDEXER_ARBITRUM_ARBSYS_CONTRACT", "0x0000000000000000000000000000000000000064") diff --git a/config/runtime/dev.exs b/config/runtime/dev.exs index 28591379ebd7..b0f2260fb5ff 100644 --- a/config/runtime/dev.exs +++ b/config/runtime/dev.exs @@ -128,6 +128,7 @@ for repo <- [ Explorer.Repo.Stability, Explorer.Repo.Zilliqa, Explorer.Repo.ZkSync, + Explorer.Repo.Via, # Feature dependent repos Explorer.Repo.BridgedTokens, Explorer.Repo.ShrunkInternalTransactions, diff --git a/config/runtime/prod.exs b/config/runtime/prod.exs index a0a161af6c74..d5299209f874 100644 --- a/config/runtime/prod.exs +++ b/config/runtime/prod.exs @@ -99,6 +99,7 @@ for repo <- [ Explorer.Repo.Stability, Explorer.Repo.Zilliqa, Explorer.Repo.ZkSync, + Explorer.Repo.Via, Explorer.Repo.Neon ] do config :explorer, repo, diff --git a/cspell.json b/cspell.json index 12c474e39da4..9355e6895415 100644 --- a/cspell.json +++ b/cspell.json @@ -741,6 +741,7 @@ "zkevm", "zksolc", "zksync", + "via", "zstd" ], "enableFiletypes": [