Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 40 additions & 18 deletions tests/integration/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,22 +9,44 @@


def get_args():
parser = argparse.ArgumentParser(description='C2Rust testsuite.')
parser.add_argument('--verbose', dest='verbose', action='store_true',
default=False,
help='Enable verbose output')
parser.add_argument('--stages', dest='stages', action='store',
nargs='*', type=str, default=None, choices=tests.Test.STAGES,
help='Only test specified stage(s)')
parser.add_argument('--print-requirements', metavar='PLATFORM',
dest='requirements', choices=['ubuntu'],
action='store', type=str, default=None,
help='Print requirements for platform and exit')
parser.add_argument('--ignore-requirements',
action='store_true',
help='Ignore test requirements')
parser.add_argument('projects', metavar='project', type=str, nargs='*',
help='Project to test (defaults to all projects if none specified)')
parser = argparse.ArgumentParser(description="C2Rust testsuite.")
parser.add_argument(
"--verbose",
dest="verbose",
action="store_true",
default=False,
help="Enable verbose output",
)
parser.add_argument(
"--stages",
dest="stages",
action="store",
nargs="*",
type=str,
default=None,
choices=tests.Test.STAGES,
help="Only test specified stage(s)",
)
parser.add_argument(
"--print-requirements",
metavar="PLATFORM",
dest="requirements",
choices=["ubuntu"],
action="store",
type=str,
default=None,
help="Print requirements for platform and exit",
)
parser.add_argument(
"--ignore-requirements", action="store_true", help="Ignore test requirements"
)
parser.add_argument(
"projects",
metavar="project",
type=str,
nargs="*",
help="Project to test (defaults to all projects if none specified)",
)
return parser.parse_args()


Expand All @@ -42,5 +64,5 @@ def print_requirements(args):
elif not conf.project_dirs and len(args.projects) > 0:
util.die(f"no such project: {args.project}")
else:
templates.autogen(conf)
tests.run_tests(conf)
generated_scripts = set(templates.autogen(conf))
tests.run_tests(conf, generated_scripts)
70 changes: 42 additions & 28 deletions tests/integration/tests/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@

from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from datetime import timedelta
import os
from pathlib import Path
import sys
import subprocess
from time import perf_counter
Expand All @@ -13,7 +13,6 @@


class Test(object):

STAGES: dict[str, list[str]] = {
"autogen": ["autogen.sh"],
"configure": ["configure.sh"],
Expand All @@ -22,15 +21,25 @@ class Test(object):
"cargo.transpile": ["cargo.transpile.gen.sh", "cargo.transpile.sh"],
"refactor": ["refactor.gen.sh", "refactor.sh"],
"cargo.refactor": ["cargo.refactor.gen.sh", "cargo.refactor.sh"],
"check": ["check.sh", "test.sh"]
"check": ["check.sh", "test.sh"],
}

def __init__(self, directory: str):
ff = next(os.walk(directory))[2]
self.scripts = set(filter(lambda f: f.endswith(".sh"), ff))
self.dir = directory
self.conf_file = os.path.join(directory, CONF_YML)
self.name = os.path.basename(directory)
def __init__(self, directory: Path, generated_scripts: set[Path]):
# We only want scripts that have been generated by us now,
# but non `*.gen*` scripts we can search for.
non_gen_script_paths = {
f
for f in directory.iterdir()
if f.suffix == ".sh" and ".gen" not in f.suffixes
}
gen_script_paths = {
path for path in generated_scripts if path.is_relative_to(directory)
}
script_paths = non_gen_script_paths | gen_script_paths
self.scripts = {str(script.relative_to(directory)) for script in script_paths}
self.dir = str(directory)
self.conf_file = str(directory / CONF_YML)
self.name = directory.name

def run_script(self, stage, script, verbose=False, xfail=False) -> bool:
"""
Expand All @@ -40,41 +49,43 @@ def run_script(self, stage, script, verbose=False, xfail=False) -> bool:
def print_log_tail_on_fail(script_path):
logfile = f"{script_path}.log"
if os.path.isfile(logfile):
grep_cmd = ['grep', '-i', '-A', '20', '-E', 'panicked|error', logfile]
grep_cmd = ["grep", "-i", "-A", "20", "-E", "panicked|error", logfile]
grep = subprocess.Popen(grep_cmd, stdout=subprocess.PIPE)
assert grep.stdout is not None
for line in grep.stdout:
print(line.decode().rstrip())

# fall back to tail if grep didn't find anything
if grep.returncode != 0:
tail = subprocess.Popen(['tail', '-n', '20', logfile], stdout=subprocess.PIPE)
tail = subprocess.Popen(
["tail", "-n", "20", logfile], stdout=subprocess.PIPE
)
assert tail.stdout is not None
for line in tail.stdout:
print(line.decode().rstrip())
else:
print("{color}Missing log file: {logf}{nocolor}".format(
color=Colors.WARNING,
logf=logfile,
nocolor=Colors.NO_COLOR)
print(
"{color}Missing log file: {logf}{nocolor}".format(
color=Colors.WARNING, logf=logfile, nocolor=Colors.NO_COLOR
)
)

script_path = os.path.join(self.dir, script)

if not os.path.isfile(script_path):
print("{color}Missing script: {script}{nocolor}".format(
color=Colors.FAIL,
script=script_path,
nocolor=Colors.NO_COLOR)
print(
"{color}Missing script: {script}{nocolor}".format(
color=Colors.FAIL, script=script_path, nocolor=Colors.NO_COLOR
)
)
return False

if not os.access(script_path, os.X_OK):
print("{color}Script is not executable: {script}{nocolor}".format(
color=Colors.FAIL,
script=script_path,
nocolor=Colors.NO_COLOR)
print(
"{color}Script is not executable: {script}{nocolor}".format(
color=Colors.FAIL, script=script_path, nocolor=Colors.NO_COLOR
)
)
return False

if not verbose:
Expand All @@ -84,7 +95,8 @@ def print_log_tail_on_fail(script_path):
name=self.name,
nc=Colors.NO_COLOR,
stage=stage,
script=relpath)
script=relpath,
)
else:
line = ""

Expand Down Expand Up @@ -187,7 +199,9 @@ def run(self, conf: Config) -> bool:
requested_stages = ", ".join(conf.stages)
stages = ", ".join(Test.STAGES.keys())
y, nc = Colors.WARNING, Colors.NO_COLOR
die(f"invalid stages: {y}{requested_stages}{nc}. valid stages: {stages}")
die(
f"invalid stages: {y}{requested_stages}{nc}. valid stages: {stages}"
)

stages = conf.stages

Expand All @@ -210,11 +224,11 @@ class TestResult:
time: timedelta


def run_tests(conf: Config):
def run_tests(conf: Config, generated_scripts: set[Path]):
if not conf.ignore_requirements:
check(conf)

tests = [Test(td) for td in conf.project_dirs]
tests = [Test(Path(td), generated_scripts) for td in conf.project_dirs]

def run(test: Test) -> TestResult:
start = perf_counter()
Expand All @@ -229,5 +243,5 @@ def run(test: Test) -> TestResult:
for result in results:
print(f"{result.test.name} took {result.time}")
if not all(result.passed for result in results):
print(f"projects failed: {" ".join(result.test.name for result in results)}")
print(f"projects failed: {' '.join(result.test.name for result in results)}")
exit(1)
1 change: 1 addition & 0 deletions tests/integration/tests/hostenv.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ def is_ubuntu_1804() -> bool:
# def is_ubuntu_1404():
# return 'Ubuntu-14.04-trusty' in platform()


def is_centos() -> bool:
return distro.name() == "CentOS"

Expand Down
10 changes: 6 additions & 4 deletions tests/integration/tests/requirements.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,10 @@ def check_apt_package(yaml: List[str]):
last: str = output.splitlines()[-1]
expected: str = f"ii {p}"
if not last.startswith(expected):
errors.append(f"package not (properly) installed: {p} (dpkg output: {output}) ")

errors.append(
f"package not (properly) installed: {p} (dpkg output: {output}) "
)

if errors:
errors = "\n".join(errors)
die(errors)
Expand Down Expand Up @@ -65,7 +67,7 @@ def check_host(host: str, yaml: Dict):
return
# print(f"{host} -> {reqs}")

for (key, val) in reqs.items():
for key, val in reqs.items():
if key == "apt":
check_apt(val)
elif key == "programs":
Expand All @@ -92,7 +94,7 @@ def check_file(file: str, yaml):


def check(conf):
for (cf, yaml) in conf.project_conf.items():
for cf, yaml in conf.project_conf.items():
check_file(cf, yaml)


Expand Down
77 changes: 47 additions & 30 deletions tests/integration/tests/templates.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import os
from pathlib import Path
import stat
from collections.abc import Mapping
from typing import Any, Dict, List
from typing import Any, Dict, Generator, List

from tests.util import *
from jinja2 import Template
Expand Down Expand Up @@ -96,42 +97,53 @@
def render_script(template: str, out_path: str, params: Dict):
out = Template(template).render(**params)

with open(out_path, 'w') as fh:
with open(out_path, "w") as fh:
fh.writelines(out)
os.chmod(out_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)


def autogen_cargo(conf_file, yaml: Dict):
def render_stage(stage_conf: Mapping[str, Any] | None, filename: str) -> bool:
def autogen_cargo(conf_file, yaml: Dict) -> Generator[Path]:
"""
Yield generated paths.
"""

def render_stage(
stage_conf: Mapping[str, Any] | None, filename: str
) -> Generator[Path]:
"""
Yield generated paths.
"""

if not isinstance(stage_conf, Mapping):
return False
return
if not stage_conf:
return False
return

ag = stage_conf.get("autogen")
if not (ag and isinstance(ag, bool)):
return False
return

params: Dict[str, str] = {}
rustflags = stage_conf.get("rustflags")
if rustflags and isinstance(rustflags, str):
params["extra_rustflags"] = rustflags

out_path = os.path.join(
os.path.dirname(conf_file),
filename
)
out_path = os.path.join(os.path.dirname(conf_file), filename)
render_script(CARGO_SH, out_path, params)
return True
yield Path(out_path)

for key, fname in (
("cargo.transpile", "cargo.transpile.gen.sh"),
("cargo.refactor", "cargo.refactor.gen.sh"),
):
render_stage(yaml.get(key), fname)
yield from render_stage(yaml.get(key), fname)


def autogen_refactor(conf_file, yaml: Dict) -> Generator[str]:
"""
Yield generated paths.
"""

def autogen_refactor(conf_file, yaml: Dict):
refactor = yaml.get("refactor")
if refactor and isinstance(refactor, Dict):
ag = refactor.get("autogen")
Expand All @@ -141,7 +153,9 @@ def autogen_refactor(conf_file, yaml: Dict):
# Get list of transformations from config
transforms = refactor.get("transforms")
if transforms and isinstance(transforms, list):
lines = [t.strip() for t in transforms if isinstance(t, str) and t.strip()]
lines = [
t.strip() for t in transforms if isinstance(t, str) and t.strip()
]
if lines:
params["transform_lines"] = "\n".join(lines)
elif transforms and isinstance(transforms, str):
Expand All @@ -151,14 +165,16 @@ def autogen_refactor(conf_file, yaml: Dict):

# Only generate script if we have transformations
if params["transform_lines"]:
out_path = os.path.join(
os.path.dirname(conf_file),
"refactor.gen.sh"
)
out_path = os.path.join(os.path.dirname(conf_file), "refactor.gen.sh")
render_script(REFACTOR_SH, out_path, params)
yield Path(out_path)


def autogen_transpile(conf_file, yaml: Dict):
def autogen_transpile(conf_file, yaml: Dict) -> Generator[Path]:
"""
Yield generated paths.
"""

transpile = yaml.get("transpile")
if transpile and isinstance(transpile, Dict):
ag = transpile.get("autogen")
Expand All @@ -181,16 +197,17 @@ def autogen_transpile(conf_file, yaml: Dict):
tflags = " ".join(tflags)
params["tflags"] = tflags


out_path = os.path.join(
os.path.dirname(conf_file),
"transpile.gen.sh"
)
out_path = os.path.join(os.path.dirname(conf_file), "transpile.gen.sh")
render_script(TRANSPILE_SH, out_path, params)
yield Path(out_path)


def autogen(conf: Config) -> Generator[Path]:
"""
Yield generated paths.
"""

def autogen(conf: Config):
for (cf, yaml) in conf.project_conf.items():
autogen_transpile(cf, yaml)
autogen_refactor(cf, yaml)
autogen_cargo(cf, yaml)
for cf, yaml in conf.project_conf.items():
yield from autogen_transpile(cf, yaml)
yield from autogen_refactor(cf, yaml)
yield from autogen_cargo(cf, yaml)
Loading