Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
d99368c
[pallet-revive] Backport all pallet-revive changes
pkhry Aug 14, 2025
eb3adaa
Update Cargo.lock
TorstenStueber Dec 5, 2025
7f515cb
Update from github-actions[bot] running command 'prdoc --audience run…
github-actions[bot] Dec 5, 2025
ffe4818
Update CI docker image
TorstenStueber Dec 5, 2025
b7ff503
Merge branch 'torsten/backport-revive-unstable2507' of github.com:par…
TorstenStueber Dec 5, 2025
cf15b71
Update PR doc
TorstenStueber Dec 5, 2025
5887cbc
Update PR doc (2)
TorstenStueber Dec 5, 2025
dfc2950
Fix clippy
TorstenStueber Dec 5, 2025
d6637c3
Fix problems in Cargo.toml files
TorstenStueber Dec 5, 2025
8085fdd
Fix umbrella definition
TorstenStueber Dec 5, 2025
37e7bfb
Fix more dependency errors
TorstenStueber Dec 5, 2025
f14fda7
Update Cargo.lock
TorstenStueber Dec 5, 2025
eb33290
bump zombienet-sdk and subxt versions (#9587)
pepoviola Sep 2, 2025
1602173
fix build errors
pgherveou Dec 5, 2025
a0ad1be
fix cargo check
pgherveou Dec 5, 2025
d20aa9b
fix sp-timestamp warn
pgherveou Dec 5, 2025
ebe2cca
fix eth-rpc test target build
pgherveou Dec 5, 2025
8d27883
Adding Retester to CI (#10071)
0xOmarA Nov 3, 2025
eb47bac
pin solc version to 0.8.30 in tests-misc.yml (#10558)
0xRVE Dec 5, 2025
b76eac2
[pallet-revive] add EVM gas call syscalls (#10554)
xermicus Dec 5, 2025
d166975
ci: ci-unified with resolc 0.5.0 (#10325)
alvicsam Nov 17, 2025
e0905c7
Mark revive-dev-runtime as no publish
TorstenStueber Dec 6, 2025
4ee1f00
Add license to revive-dev-runtime
TorstenStueber Dec 6, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
7 changes: 0 additions & 7 deletions .config/nextest.toml
Original file line number Diff line number Diff line change
Expand Up @@ -123,10 +123,3 @@ serial-integration = { max-threads = 1 }
[[profile.default.overrides]]
filter = 'test(/(^ui$|_ui|ui_)/)'
test-group = 'serial-integration'

# Running eth-rpc tests sequentially
# These tests rely on a shared resource (the RPC and Node)
# and would cause race conditions due to transaction nonces if run in parallel.
[[profile.default.overrides]]
filter = 'package(pallet-revive-eth-rpc) and test(/^tests::/)'
test-group = 'serial-integration'
37 changes: 37 additions & 0 deletions .github/actions/get-solc/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
name: "Install Solidity Compiler"
description: "Installs the Ethereum solc Solidity compiler frontend executable"

runs:
using: "composite"
steps:
- name: Figure out Solc Download URL
shell: bash
run: |
if [[ "${{ runner.os }}" == "Linux" ]]; then
echo "SOLC_NAME=solc-static-linux" >> $GITHUB_ENV
elif [[ "${{ runner.os }}" == "Windows" ]]; then
echo "SOLC_NAME=solc-windows.exe" >> $GITHUB_ENV
else
echo "SOLC_NAME=solc-macos" >> $GITHUB_ENV
fi

- name: Download Solc
shell: bash
run: |
mkdir -p solc
curl -sSL --output solc/solc https://github.com/ethereum/solidity/releases/download/v0.8.30/${SOLC_NAME}

- name: Make Solc Executable
if: ${{ runner.os == 'Windows' }}
shell: bash
run: |
echo "$(pwd -W)\\solc" >> $GITHUB_PATH
mv solc/solc solc/solc.exe

- name: Make Solc Executable
if: ${{ runner.os != 'Windows' }}
shell: bash
run: |
echo "$(pwd)/solc" >> $GITHUB_PATH
chmod +x solc/solc
xattr -x solc/solc
2 changes: 1 addition & 1 deletion .github/env
Original file line number Diff line number Diff line change
@@ -1 +1 @@
IMAGE="docker.io/paritytech/ci-unified:bullseye-1.88.0-2025-06-27-v202506301118"
IMAGE="docker.io/paritytech/ci-unified:bullseye-1.88.0-2025-06-27-v202511141243"
241 changes: 241 additions & 0 deletions .github/scripts/process-differential-tests-report.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,241 @@
"""
This script is used to turn the JSON report produced by the revive differential tests tool into an
easy to consume markdown document for the purpose of reporting this information in the Polkadot SDK
CI. The full models used in the JSON report can be found in the revive differential tests repo and
the models used in this script are just a partial reproduction of the full report models.
"""

import json, typing, io, sys


class Report(typing.TypedDict):
context: "Context"
execution_information: dict["MetadataFilePathString", "MetadataFileReport"]


class MetadataFileReport(typing.TypedDict):
case_reports: dict["CaseIdxString", "CaseReport"]


class CaseReport(typing.TypedDict):
mode_execution_reports: dict["ModeString", "ExecutionReport"]


class ExecutionReport(typing.TypedDict):
status: "TestCaseStatus"


class Context(typing.TypedDict):
Test: "TestContext"


class TestContext(typing.TypedDict):
corpus_configuration: "CorpusConfiguration"


class CorpusConfiguration(typing.TypedDict):
test_specifiers: list["TestSpecifier"]


class CaseStatusSuccess(typing.TypedDict):
status: typing.Literal["Succeeded"]
steps_executed: int


class CaseStatusFailure(typing.TypedDict):
status: typing.Literal["Failed"]
reason: str


class CaseStatusIgnored(typing.TypedDict):
status: typing.Literal["Ignored"]
reason: str


TestCaseStatus = typing.Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored]
"""A union type of all of the possible statuses that could be reported for a case."""

TestSpecifier = str
"""A test specifier string. For example resolc-compiler-tests/fixtures/solidity/test.json::0::Y+"""

ModeString = str
"""The mode string. For example Y+ >=0.8.13"""

MetadataFilePathString = str
"""The path to a metadata file. For example resolc-compiler-tests/fixtures/solidity/test.json"""

CaseIdxString = str
"""The index of a case as a string. For example '0'"""


def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
"""
Given a path, this function returns the path relative to the resolc-compiler-test directory. The
following is an example of an input and an output:

Input: ~/polkadot-sdk/revive-differential-tests/resolc-compiler-tests/fixtures/solidity/test.json
Output: test.json
"""

return f"{path.split('resolc-compiler-tests/fixtures/solidity')[-1].strip('/')}"


def main() -> None:
with open(sys.argv[1], "r") as file:
report: Report = json.load(file)

# Starting the markdown document and adding information to it as we go.
markdown_document: io.TextIOWrapper = open("report.md", "w")
print("# Differential Tests Results", file=markdown_document)

# Getting all of the test specifiers from the report and making them relative to the tests dir.
test_specifiers: list[str] = list(
map(
path_relative_to_resolc_compiler_test_directory,
report["context"]["Test"]["corpus_configuration"]["test_specifiers"],
)
)
print("## Specified Tests", file=markdown_document)
for test_specifier in test_specifiers:
print(f"* `{test_specifier}`", file=markdown_document)

# Counting the total number of test cases, successes, failures, and ignored tests
total_number_of_cases: int = 0
total_number_of_successes: int = 0
total_number_of_failures: int = 0
total_number_of_ignores: int = 0
for _, mode_to_case_mapping in report["execution_information"].items():
for _, case_idx_to_report_mapping in mode_to_case_mapping[
"case_reports"
].items():
for _, execution_report in case_idx_to_report_mapping[
"mode_execution_reports"
].items():
status: TestCaseStatus = execution_report["status"]

total_number_of_cases += 1
if status["status"] == "Succeeded":
total_number_of_successes += 1
elif status["status"] == "Failed":
total_number_of_failures += 1
elif status["status"] == "Ignored":
total_number_of_ignores += 1
else:
raise Exception(
f"Encountered a status that's unknown to the script: {status}"
)

print("## Counts", file=markdown_document)
print(
f"* **Total Number of Test Cases:** {total_number_of_cases}",
file=markdown_document,
)
print(
f"* **Total Number of Successes:** {total_number_of_successes}",
file=markdown_document,
)
print(
f"* **Total Number of Failures:** {total_number_of_failures}",
file=markdown_document,
)
print(
f"* **Total Number of Ignores:** {total_number_of_ignores}",
file=markdown_document,
)

# Grouping the various test cases into dictionaries and groups depending on their status to make
# them easier to include in the markdown document later on.
successful_cases: dict[
MetadataFilePathString, dict[CaseIdxString, set[ModeString]]
] = {}
for metadata_file_path, mode_to_case_mapping in report[
"execution_information"
].items():
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[
"case_reports"
].items():
for mode_string, execution_report in case_idx_to_report_mapping[
"mode_execution_reports"
].items():
status: TestCaseStatus = execution_report["status"]
metadata_file_path: str = (
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
)
mode_string: str = mode_string.replace(" M3", "+").replace(" M0", "-")

if status["status"] == "Succeeded":
successful_cases.setdefault(
metadata_file_path,
{},
).setdefault(
case_idx_string, set()
).add(mode_string)

print("## Failures", file=markdown_document)
print(
"The test specifiers seen in this section have the format 'path::case_idx::compilation_mode'\
and they're compatible with the revive differential tests framework and can be specified\
to it directly in the same way that they're provided through the `--test` argument of the\
framework.\n",
file=markdown_document,
)
print(
"The failures are provided in an expandable section to ensure that the PR does not get \
polluted with information. Please click on the section below for more information",
file=markdown_document,
)
print(
"<details><summary>Detailed Differential Tests Failure Information</summary>\n\n",
file=markdown_document,
)
print("| Test Specifier | Failure Reason | Note |", file=markdown_document)
print("| -- | -- | -- |", file=markdown_document)

for metadata_file_path, mode_to_case_mapping in report[
"execution_information"
].items():
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[
"case_reports"
].items():
for mode_string, execution_report in case_idx_to_report_mapping[
"mode_execution_reports"
].items():
status: TestCaseStatus = execution_report["status"]
metadata_file_path: str = (
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
)
mode_string: str = mode_string.replace(" M3", "+").replace(" M0", "-")

if status["status"] != "Failed":
continue

failure_reason: str = status["reason"].replace("\n", " ")

note: str = ""
modes_where_this_case_succeeded: set[ModeString] = (
successful_cases.setdefault(
metadata_file_path,
{},
).setdefault(case_idx_string, set())
)
if len(modes_where_this_case_succeeded) != 0:
note: str = (
f"This test case succeeded with other compilation modes: {modes_where_this_case_succeeded}"
)

test_specifier: str = (
f"{metadata_file_path}::{case_idx_string}::{mode_string}"
)
print(
f"| `{test_specifier}` | `{failure_reason}` | {note} |",
file=markdown_document,
)
print("\n\n</details>", file=markdown_document)

# The primary downside of not using `with`, but I guess it's better since I don't want to over
# indent the code.
markdown_document.close()


if __name__ == "__main__":
main()
Loading
Loading