diff --git a/Dockerfile b/Dockerfile index 1025b65..bc6a6f0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,15 +1,15 @@ -FROM python:3.11.2-slim +FROM python:3.11.5-alpine3.18 COPY requirements.txt /requirements.txt RUN pip install -r /requirements.txt -RUN apt-get update \ - && apt-get autoremove -y \ - && rm -rf /var/lib/apt/lists/* +RUN apk update && apk upgrade\ + && apk --no-cache add curl bash\ + && apk cache clean COPY . /opt/test-runner WORKDIR /opt/test-runner -ENTRYPOINT [ "/opt/test-runner/bin/run.sh" ] +ENTRYPOINT ["sh", "/opt/test-runner/bin/run.sh" ] \ No newline at end of file diff --git a/bin/run-in-docker.sh b/bin/run-in-docker.sh index 9f8c5da..34ca76b 100755 --- a/bin/run-in-docker.sh +++ b/bin/run-in-docker.sh @@ -1,5 +1,4 @@ -#!/usr/bin/env bash -set -e +#!/usr/bin/env sh # Synopsis: # Test runner for run.sh in a docker container @@ -19,6 +18,9 @@ set -e # Example: # ./run-in-docker.sh two-fer ./relative/path/to/two-fer/solution/folder/ ./relative/path/to/output/directory/ +# Stop executing when a command returns a non-zero return code +set -e + # If arguments not provided, print usage and exit if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then echo "usage: run-in-docker.sh exercise-slug ./relative/path/to/solution/folder/ ./relative/path/to/output/directory/" diff --git a/bin/run-tests-in-docker.sh b/bin/run-tests-in-docker.sh index f296c5b..791c69f 100755 --- a/bin/run-tests-in-docker.sh +++ b/bin/run-tests-in-docker.sh @@ -24,7 +24,7 @@ docker run \ --network none \ --read-only \ --mount type=bind,src="${PWD}/test",dst=/opt/test-runner/test \ - --mount type=volume,dst=/tmp \ + --mount type=tmpfs,dst=/tmp \ --workdir /opt/test-runner \ --entrypoint pytest \ exercism/python-test-runner -vv diff --git a/bin/run.sh b/bin/run.sh index 6e3dc08..ad46f2e 100755 --- a/bin/run.sh +++ b/bin/run.sh @@ -1,4 +1,5 @@ -#! /bin/sh +#! /usr/bin/env sh + root="$( dirname "$( cd "$( dirname "$0" )" >/dev/null 2>&1 && pwd )" )" export PYTHONPATH="$root:$PYTHONPATH" -python3 bin/run.py "$@" +/usr/bin/env python3 bin/run.py "$@" \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 38486b4..e748954 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ black<=22.3.0 pytest~=7.2.2 -pytest-subtests~=0.10.0 +pytest-subtests~=0.11.0 tomli>=1.1.0; python_full_version < '3.11.2' diff --git a/runner/__init__.py b/runner/__init__.py index 7ec9375..80c07b6 100644 --- a/runner/__init__.py +++ b/runner/__init__.py @@ -15,6 +15,7 @@ from .sort import TestOrder + class ResultsReporter: def __init__(self): self.results = Results() @@ -37,7 +38,6 @@ def pytest_collection_modifyitems(self, session, config, items): for mark in item.iter_markers(name='task'): self.tests[name] = Test(name=name, task_id=mark.kwargs['taskno']) - def _sort_by_lineno(item): test_id = Hierarchy(item.nodeid) source = Path(item.fspath) @@ -50,28 +50,60 @@ def pytest_runtest_logreport(self, report): Process a test setup / call / teardown report. """ - name = report.head_line if report.head_line else ".".join(report.nodeid.split("::")[1:]) + name = ".".join(report.nodeid.split("::")[1:]) + if report.head_line: + name = report.head_line.split(" (")[0] + #Add variation name to test output. if name not in self.tests: self.tests[name] = Test(name) - state = self.tests[name] # ignore successful setup and teardown stages if report.passed and report.when != "call": return - # Update tests that have already failed with capstdout and return. + #Update tests that have already failed with capstdout and return. if not state.is_passing(): - if report.capstdout.rstrip('FFFFFFFF ').rstrip('uuuuu'): - state.output = report.capstdout.rstrip('FFFFFFFF ').rstrip('uuuuu') + + #Check if a report is a concept exercise subtest parent. + if report.capstdout: + + #split up the captured stdout by subtest result. + captures = [item for item in report.capstdout.split('\nu')] + if captures[0].startswith('u'): + captures[0] = captures[0][1:] + + parsed_captures = [] + + # Insert spacers for subtests and stdout entries in correct position. + for item in captures: + empties = len(item) - len(item.lstrip('u')) + if empties > 0: + for number in range(1, empties+1): + parsed_captures.append(' ') + parsed_captures.append(item.lstrip('u')) + else: parsed_captures.append(item) + + # Generate variation numbers for each subtest output section. + variants = (f'[variation #{number}]: {item}' for + item, number in zip(parsed_captures, range(1, len(parsed_captures)+1))) + + # Go through the variations and match them to self.tests. + # Insert matched variation output into test output field. + for item in variants: + for name in self.tests: + if item.split(":")[0] in name and report.nodeid.split("::")[2] in name: + self.tests[name].output = item.split("]: ")[1] + else: + state.output = report.capstdout return - # Record captured relevant stdout content for passed tests. - if report.capstdout: - state.output = report.capstdout + else: + if report.capstdout: + state.output = report.capstdout # Handle details of test failure if report.failed: @@ -108,7 +140,6 @@ def pytest_runtest_logreport(self, report): ) self.tests[parent_test_name].test_code = state.test_code - def pytest_sessionfinish(self, session, exitstatus): """ Processes the results into a report. @@ -209,6 +240,7 @@ def run(slug: Slug, indir: Directory, outdir: Directory, args: List[str]) -> Non # dump the report out_file.write_text(reporter.results.as_json()) + # remove cache directories for cache_dir in ['.pytest_cache', '__pycache__']: dirpath = indir / cache_dir diff --git a/runner/data.py b/runner/data.py index 3597fda..24a1cc9 100644 --- a/runner/data.py +++ b/runner/data.py @@ -79,6 +79,7 @@ def output(self, captured: Output) -> None: return captured = captured.strip() + truncate_msg = " [Output was truncated. Please limit to 500 chars]" if len(captured) > 500: captured = captured[: 500 - len(truncate_msg)] + truncate_msg @@ -141,9 +142,7 @@ def error(self, message: Message = None) -> None: def _factory(items): result = {} for key, value in items: - if key == "_output" or key in {"message", "output", "subtest"} and value is None: - continue - elif key == "_output" or key in {"message", "output", "subtest"} and "\u001b[31mF\u001b[0m" in value: + if key == "_output" or key in {"message", "output", "subtest"} and value in (None, "", " "): continue if isinstance(value, Status): diff --git a/test/example-all-fail-tasks-and-subtests/results.json b/test/example-all-fail-tasks-and-subtests/results.json index b4afc25..795ba7e 100644 --- a/test/example-all-fail-tasks-and-subtests/results.json +++ b/test/example-all-fail-tasks-and-subtests/results.json @@ -17,84 +17,84 @@ "task_id": 1 }, { - "name": "ExampleAllFail > hello [variation #1] (param=15, result=('Hello, World!', 15))", + "name": "ExampleAllFail > hello [variation #1]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 15) : Expected: ('Hello, World!', 15) but got something else instead.", "test_code": "input_data = [15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 1 }, { - "name": "ExampleAllFail > hello [variation #2] (param=23, result=('Hello, World!', 23))", + "name": "ExampleAllFail > hello [variation #2]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 23) : Expected: ('Hello, World!', 23) but got something else instead.", "test_code": "input_data = [15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 1 }, { - "name": "ExampleAllFail > hello [variation #3] (param=33, result=('Hello, World!', 33))", + "name": "ExampleAllFail > hello [variation #3]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 33) : Expected: ('Hello, World!', 33) but got something else instead.", "test_code": "input_data = [15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 1 }, { - "name": "ExampleAllFail > hello [variation #4] (param=39, result=('Hello, World!', 39))", + "name": "ExampleAllFail > hello [variation #4]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 39) : Expected: ('Hello, World!', 39) but got something else instead.", "test_code": "input_data = [15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 1 }, { - "name": "ExampleAllFail > abc [variation #1] (param='frog', result=('Hello, World!', 'frog'))", + "name": "ExampleAllFail > abc [variation #1]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'frog') : Expected: ('Hello, World!', 'frog') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 1 }, { - "name": "ExampleAllFail > abc [variation #2] (param='fish', result=('Hello, World!', 'fish'))", + "name": "ExampleAllFail > abc [variation #2]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'fish') : Expected: ('Hello, World!', 'fish') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 1 }, { - "name": "ExampleAllFail > abc [variation #3] (param='coconut', result=('Hello, World!', 'coconut'))", + "name": "ExampleAllFail > abc [variation #3]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'coconut') : Expected: ('Hello, World!', 'coconut') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 1 }, { - "name": "ExampleAllFail > abc [variation #4] (param='pineapple', result=('Hello, World!', 'pineapple'))", + "name": "ExampleAllFail > abc [variation #4]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'pineapple') : Expected: ('Hello, World!', 'pineapple') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 1 }, { - "name": "ExampleAllFail > abc [variation #5] (param='carrot', result=('Hello, World!', 'carrot'))", + "name": "ExampleAllFail > abc [variation #5]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'carrot') : Expected: ('Hello, World!', 'carrot') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 1 }, { - "name": "ExampleAllFail > abc [variation #6] (param='cucumber', result=('Hello, World!', 'cucumber'))", + "name": "ExampleAllFail > abc [variation #6]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'cucumber') : Expected: ('Hello, World!', 'cucumber') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 1 }, { - "name": "ExampleAllFail > abc [variation #7] (param='grass', result=('Hello, World!', 'grass'))", + "name": "ExampleAllFail > abc [variation #7]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'grass') : Expected: ('Hello, World!', 'grass') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 1 }, { - "name": "ExampleAllFail > abc [variation #8] (param='tree', result=('Hello, World!', 'tree'))", + "name": "ExampleAllFail > abc [variation #8]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'tree') : Expected: ('Hello, World!', 'tree') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", @@ -115,84 +115,84 @@ "task_id": 2 }, { - "name": "ExampleAllFailOther > dummy [variation #1] (param='frog', result=('Hello, World!', 'frog'))", + "name": "ExampleAllFailOther > dummy [variation #1]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'frog') : Expected: ('Hello, World!', 'frog') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 2 }, { - "name": "ExampleAllFailOther > dummy [variation #2] (param='fish', result=('Hello, World!', 'fish'))", + "name": "ExampleAllFailOther > dummy [variation #2]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'fish') : Expected: ('Hello, World!', 'fish') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 2 }, { - "name": "ExampleAllFailOther > dummy [variation #3] (param='coconut', result=('Hello, World!', 'coconut'))", + "name": "ExampleAllFailOther > dummy [variation #3]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'coconut') : Expected: ('Hello, World!', 'coconut') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 2 }, { - "name": "ExampleAllFailOther > dummy [variation #4] (param='pineapple', result=('Hello, World!', 'pineapple'))", + "name": "ExampleAllFailOther > dummy [variation #4]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'pineapple') : Expected: ('Hello, World!', 'pineapple') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 2 }, { - "name": "ExampleAllFailOther > dummy [variation #5] (param='carrot', result=('Hello, World!', 'carrot'))", + "name": "ExampleAllFailOther > dummy [variation #5]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'carrot') : Expected: ('Hello, World!', 'carrot') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 2 }, { - "name": "ExampleAllFailOther > dummy [variation #6] (param='cucumber', result=('Hello, World!', 'cucumber'))", + "name": "ExampleAllFailOther > dummy [variation #6]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'cucumber') : Expected: ('Hello, World!', 'cucumber') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 2 }, { - "name": "ExampleAllFailOther > dummy [variation #7] (param='grass', result=('Hello, World!', 'grass'))", + "name": "ExampleAllFailOther > dummy [variation #7]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'grass') : Expected: ('Hello, World!', 'grass') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 2 }, { - "name": "ExampleAllFailOther > dummy [variation #8] (param='tree', result=('Hello, World!', 'tree'))", + "name": "ExampleAllFailOther > dummy [variation #8]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 'tree') : Expected: ('Hello, World!', 'tree') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 2 }, { - "name": "ExampleAllFailOther > hello [variation #1] (param=1, result=('Hello, World!', 1))", + "name": "ExampleAllFailOther > hello [variation #1]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 1) : Expected: ('Hello, World!', 1) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 2 }, { - "name": "ExampleAllFailOther > hello [variation #2] (param=2, result=('Hello, World!', 2))", + "name": "ExampleAllFailOther > hello [variation #2]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 2) : Expected: ('Hello, World!', 2) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 2 }, { - "name": "ExampleAllFailOther > hello [variation #3] (param=5, result=('Hello, World!', 5))", + "name": "ExampleAllFailOther > hello [variation #3]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 5) : Expected: ('Hello, World!', 5) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", "task_id": 2 }, { - "name": "ExampleAllFailOther > hello [variation #4] (param=10, result=('Hello, World!', 10))", + "name": "ExampleAllFailOther > hello [variation #4]", "status": "fail", "message": "AssertionError: 'Goodbye!' != ('Hello, World!', 10) : Expected: ('Hello, World!', 10) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n failure_msg=f'Expected: {result} but got something else instead.'\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result, msg=failure_msg)", diff --git a/test/example-has-stdout-and-tasks/example_has_stdout_and_tasks_test.py b/test/example-has-stdout-and-tasks/example_has_stdout_and_tasks_test.py index dc808f3..7693b12 100644 --- a/test/example-has-stdout-and-tasks/example_has_stdout_and_tasks_test.py +++ b/test/example-has-stdout-and-tasks/example_has_stdout_and_tasks_test.py @@ -16,7 +16,7 @@ def test_abc(self): self.assertEqual(hello(), "Hello, World!") @pytest.mark.task(taskno=3) - def test_trancation(self): + def test_truncation(self): self.assertEqual(must_truncate(), "Hello, World!") diff --git a/test/example-has-stdout-and-tasks/results.json b/test/example-has-stdout-and-tasks/results.json index 338f6c9..31f9c7b 100644 --- a/test/example-has-stdout-and-tasks/results.json +++ b/test/example-has-stdout-and-tasks/results.json @@ -19,7 +19,7 @@ "output": "Hello, World!" }, { - "name": "ExampleHasStdoutAndTasks > trancation", + "name": "ExampleHasStdoutAndTasks > truncation", "status": "fail", "message": "AssertionError: 'Goodbye!' != 'Hello, World!'\n- Goodbye!\n+ Hello, World!", "test_code": "self.assertEqual(must_truncate(), \"Hello, World!\")", diff --git a/test/example-has-stdout-tasks-and-subtests/results.json b/test/example-has-stdout-tasks-and-subtests/results.json index 9f63c2e..6cc4651 100644 --- a/test/example-has-stdout-tasks-and-subtests/results.json +++ b/test/example-has-stdout-tasks-and-subtests/results.json @@ -7,128 +7,142 @@ "status": "fail", "message": "One or more variations of this test failed. Details can be found under each [variant#].", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 1, - "output": "Hello, World! 1\nuHello, World! 2\nuHello, World! 5\nuHello, World! 10\nuHello, World! 15\nuHello, World! 23\nuHello, World! 33\nuHello, World! 39" + "task_id": 1 }, { - "name": "ExampleHasStdout > hello [variation #1] (param=1, result=('Hello, World!', 1))", + "name": "ExampleHasStdout > hello [variation #1]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 1) : Expected: ('Hello, World!', 1) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 1 + "task_id": 1, + "output": "Hello, World! 1" }, { - "name": "ExampleHasStdout > hello [variation #2] (param=2, result=('Hello, World!', 2))", + "name": "ExampleHasStdout > hello [variation #2]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 2) : Expected: ('Hello, World!', 2) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 1 + "task_id": 1, + "output": "Hello, World! 2" }, { - "name": "ExampleHasStdout > hello [variation #3] (param=5, result=('Hello, World!', 5))", + "name": "ExampleHasStdout > hello [variation #3]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 5) : Expected: ('Hello, World!', 5) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 1 + "task_id": 1, + "output": "Hello, World! 5" }, { - "name": "ExampleHasStdout > hello [variation #4] (param=10, result=('Hello, World!', 10))", + "name": "ExampleHasStdout > hello [variation #4]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 10) : Expected: ('Hello, World!', 10) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 1 + "task_id": 1, + "output": "Hello, World! 10" }, { - "name": "ExampleHasStdout > hello [variation #5] (param=15, result=('Hello, World!', 15))", + "name": "ExampleHasStdout > hello [variation #5]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 15) : Expected: ('Hello, World!', 15) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 1 + "task_id": 1, + "output": "Hello, World! 15" }, { - "name": "ExampleHasStdout > hello [variation #6] (param=23, result=('Hello, World!', 23))", + "name": "ExampleHasStdout > hello [variation #6]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 23) : Expected: ('Hello, World!', 23) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 1 + "task_id": 1, + "output": "Hello, World! 23" }, { - "name": "ExampleHasStdout > hello [variation #7] (param=33, result=('Hello, World!', 33))", + "name": "ExampleHasStdout > hello [variation #7]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 33) : Expected: ('Hello, World!', 33) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 1 + "task_id": 1, + "output": "Hello, World! 33" }, { - "name": "ExampleHasStdout > hello [variation #8] (param=39, result=('Hello, World!', 39))", + "name": "ExampleHasStdout > hello [variation #8]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 39) : Expected: ('Hello, World!', 39) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 1 + "task_id": 1, + "output": "Hello, World! 39" }, { "name": "ExampleHasStdout > abc", "status": "fail", "message": "One or more variations of this test failed. Details can be found under each [variant#].", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 2, - "output": "Hello, World! frog\nuHello, World! fish\nuHello, World! coconut\nuHello, World! pineapple\nuHello, World! carrot\nuHello, World! cucumber\nuHello, World! grass\nuHello, World! tree" + "task_id": 2 }, { - "name": "ExampleHasStdout > abc [variation #1] (param='frog', result=('Hello, World!', 'frog'))", + "name": "ExampleHasStdout > abc [variation #1]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'frog') : Expected: ('Hello, World!', 'frog') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 2 + "task_id": 2, + "output": "Hello, World! frog" }, { - "name": "ExampleHasStdout > abc [variation #2] (param='fish', result=('Hello, World!', 'fish'))", + "name": "ExampleHasStdout > abc [variation #2]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'fish') : Expected: ('Hello, World!', 'fish') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 2 + "task_id": 2, + "output": "Hello, World! fish" }, { - "name": "ExampleHasStdout > abc [variation #3] (param='coconut', result=('Hello, World!', 'coconut'))", + "name": "ExampleHasStdout > abc [variation #3]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'coconut') : Expected: ('Hello, World!', 'coconut') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 2 + "task_id": 2, + "output": "Hello, World! coconut" }, { - "name": "ExampleHasStdout > abc [variation #4] (param='pineapple', result=('Hello, World!', 'pineapple'))", + "name": "ExampleHasStdout > abc [variation #4]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'pineapple') : Expected: ('Hello, World!', 'pineapple') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 2 + "task_id": 2, + "output": "Hello, World! pineapple" }, { - "name": "ExampleHasStdout > abc [variation #5] (param='carrot', result=('Hello, World!', 'carrot'))", + "name": "ExampleHasStdout > abc [variation #5]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'carrot') : Expected: ('Hello, World!', 'carrot') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 2 + "task_id": 2, + "output": "Hello, World! carrot" }, { - "name": "ExampleHasStdout > abc [variation #6] (param='cucumber', result=('Hello, World!', 'cucumber'))", + "name": "ExampleHasStdout > abc [variation #6]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'cucumber') : Expected: ('Hello, World!', 'cucumber') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 2 + "task_id": 2, + "output": "Hello, World! cucumber" }, { - "name": "ExampleHasStdout > abc [variation #7] (param='grass', result=('Hello, World!', 'grass'))", + "name": "ExampleHasStdout > abc [variation #7]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'grass') : Expected: ('Hello, World!', 'grass') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 2 + "task_id": 2, + "output": "Hello, World! grass" }, { - "name": "ExampleHasStdout > abc [variation #8] (param='tree', result=('Hello, World!', 'tree'))", + "name": "ExampleHasStdout > abc [variation #8]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'tree') : Expected: ('Hello, World!', 'tree') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 2 + "task_id": 2, + "output": "Hello, World! tree" }, { "name": "ExampleHasStdout > truncation", @@ -143,128 +157,142 @@ "status": "fail", "message": "One or more variations of this test failed. Details can be found under each [variant#].", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 4, - "output": "Hello, World! frog\nuHello, World! fish\nuHello, World! coconut\nuHello, World! pineapple\nuHello, World! carrot\nuHello, World! cucumber\nuHello, World! grass\nuHello, World! tree" + "task_id": 4 }, { - "name": "ExampleHasStdoutOther > dummy [variation #1] (param='frog', result=('Hello, World!', 'frog'))", + "name": "ExampleHasStdoutOther > dummy [variation #1]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'frog') : Expected: ('Hello, World!', 'frog') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 4 + "task_id": 4, + "output": "Hello, World! frog" }, { - "name": "ExampleHasStdoutOther > dummy [variation #2] (param='fish', result=('Hello, World!', 'fish'))", + "name": "ExampleHasStdoutOther > dummy [variation #2]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'fish') : Expected: ('Hello, World!', 'fish') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 4 + "task_id": 4, + "output": "Hello, World! fish" }, { - "name": "ExampleHasStdoutOther > dummy [variation #3] (param='coconut', result=('Hello, World!', 'coconut'))", + "name": "ExampleHasStdoutOther > dummy [variation #3]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'coconut') : Expected: ('Hello, World!', 'coconut') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 4 + "task_id": 4, + "output": "Hello, World! coconut" }, { - "name": "ExampleHasStdoutOther > dummy [variation #4] (param='pineapple', result=('Hello, World!', 'pineapple'))", + "name": "ExampleHasStdoutOther > dummy [variation #4]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'pineapple') : Expected: ('Hello, World!', 'pineapple') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 4 + "task_id": 4, + "output": "Hello, World! pineapple" }, { - "name": "ExampleHasStdoutOther > dummy [variation #5] (param='carrot', result=('Hello, World!', 'carrot'))", + "name": "ExampleHasStdoutOther > dummy [variation #5]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'carrot') : Expected: ('Hello, World!', 'carrot') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 4 + "task_id": 4, + "output": "Hello, World! carrot" }, { - "name": "ExampleHasStdoutOther > dummy [variation #6] (param='cucumber', result=('Hello, World!', 'cucumber'))", + "name": "ExampleHasStdoutOther > dummy [variation #6]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'cucumber') : Expected: ('Hello, World!', 'cucumber') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 4 + "task_id": 4, + "output": "Hello, World! cucumber" }, { - "name": "ExampleHasStdoutOther > dummy [variation #7] (param='grass', result=('Hello, World!', 'grass'))", + "name": "ExampleHasStdoutOther > dummy [variation #7]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'grass') : Expected: ('Hello, World!', 'grass') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 4 + "task_id": 4, + "output": "Hello, World! grass" }, { - "name": "ExampleHasStdoutOther > dummy [variation #8] (param='tree', result=('Hello, World!', 'tree'))", + "name": "ExampleHasStdoutOther > dummy [variation #8]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 'tree') : Expected: ('Hello, World!', 'tree') but got something else instead.", "test_code": "input_data = ['frog', 'fish', 'coconut', 'pineapple', 'carrot', 'cucumber', 'grass', 'tree']\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 4 + "task_id": 4, + "output": "Hello, World! tree" }, { "name": "ExampleHasStdoutOther > hello", "status": "fail", "message": "One or more variations of this test failed. Details can be found under each [variant#].", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 5, - "output": "Hello, World! 1\nuHello, World! 2\nuHello, World! 5\nuHello, World! 10\nuHello, World! 15\nuHello, World! 23\nuHello, World! 33\nuHello, World! 39" + "task_id": 5 }, { - "name": "ExampleHasStdoutOther > hello [variation #1] (param=1, result=('Hello, World!', 1))", + "name": "ExampleHasStdoutOther > hello [variation #1]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 1) : Expected: ('Hello, World!', 1) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 5 + "task_id": 5, + "output": "Hello, World! 1" }, { - "name": "ExampleHasStdoutOther > hello [variation #2] (param=2, result=('Hello, World!', 2))", + "name": "ExampleHasStdoutOther > hello [variation #2]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 2) : Expected: ('Hello, World!', 2) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 5 + "task_id": 5, + "output": "Hello, World! 2" }, { - "name": "ExampleHasStdoutOther > hello [variation #3] (param=5, result=('Hello, World!', 5))", + "name": "ExampleHasStdoutOther > hello [variation #3]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 5) : Expected: ('Hello, World!', 5) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 5 + "task_id": 5, + "output": "Hello, World! 5" }, { - "name": "ExampleHasStdoutOther > hello [variation #4] (param=10, result=('Hello, World!', 10))", + "name": "ExampleHasStdoutOther > hello [variation #4]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 10) : Expected: ('Hello, World!', 10) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 5 + "task_id": 5, + "output": "Hello, World! 10" }, { - "name": "ExampleHasStdoutOther > hello [variation #5] (param=15, result=('Hello, World!', 15))", + "name": "ExampleHasStdoutOther > hello [variation #5]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 15) : Expected: ('Hello, World!', 15) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 5 + "task_id": 5, + "output": "Hello, World! 15" }, { - "name": "ExampleHasStdoutOther > hello [variation #6] (param=23, result=('Hello, World!', 23))", + "name": "ExampleHasStdoutOther > hello [variation #6]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 23) : Expected: ('Hello, World!', 23) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 5 + "task_id": 5, + "output": "Hello, World! 23" }, { - "name": "ExampleHasStdoutOther > hello [variation #7] (param=33, result=('Hello, World!', 33))", + "name": "ExampleHasStdoutOther > hello [variation #7]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 33) : Expected: ('Hello, World!', 33) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 5 + "task_id": 5, + "output": "Hello, World! 33" }, { - "name": "ExampleHasStdoutOther > hello [variation #8] (param=39, result=('Hello, World!', 39))", + "name": "ExampleHasStdoutOther > hello [variation #8]", "status": "fail", "message": "AssertionError: None != ('Hello, World!', 39) : Expected: ('Hello, World!', 39) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10, 15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\nnumber_of_variants = range(1, len(input_data) + 1)\n\nfor variant, param, result in zip(number_of_variants, input_data, result_data):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", - "task_id": 5 + "task_id": 5, + "output": "Hello, World! 39" } ] } \ No newline at end of file diff --git a/test/example-has-stdout/example_has_stdout.py b/test/example-has-stdout/example_has_stdout.py index c11070f..0266584 100644 --- a/test/example-has-stdout/example_has_stdout.py +++ b/test/example-has-stdout/example_has_stdout.py @@ -19,3 +19,9 @@ def must_truncate(): Porta non pulvinar neque laoreet suspendisse interdum consectetur libero. Id faucibus nisl tincidunt eget nullam. Ultricies lacus sed turpis tincidunt id. Hendrerit dolor magna eget est lorem ipsum. Enim ut sem viverra aliquet. Eget nulla facilisi etiam dignissim diam quis enim lobortis scelerisque. Ac tortor dignissim convallis aenean et tortor at. Non tellus orci ac auctor augue. Nec dui nunc mattis enim ut tellus. Eget nunc lobortis mattis aliquam faucibus purus in massa tempor. Elementum nibh tellus molestie nunc. Ornare lectus sit amet est placerat in. Nec feugiat in fermentum posuere urna nec tincidunt praesent. Vestibulum rhoncus est pellentesque elit. Mollis nunc sed id semper risus in. Vitae elementum curabitur vitae nunc sed velit. Duis tristique sollicitudin nibh sit amet commodo nulla facilisi.""" ) return "Goodbye!" + + +def other(): + print("Hello, World!") + print(5, 6, 7) + return "hello, world." diff --git a/test/example-has-stdout/example_has_stdout_test.py b/test/example-has-stdout/example_has_stdout_test.py index 2beb12c..75b464e 100644 --- a/test/example-has-stdout/example_has_stdout_test.py +++ b/test/example-has-stdout/example_has_stdout_test.py @@ -1,7 +1,7 @@ import unittest -from example_has_stdout import hello, must_truncate +from example_has_stdout import hello, must_truncate, other class ExampleHasStdoutTest(unittest.TestCase): @@ -11,16 +11,16 @@ def test_hello(self): def test_abc(self): self.assertEqual(hello(), "Hello, World!") - def test_trancation(self): + def test_truncation(self): self.assertEqual(must_truncate(), "Hello, World!") class ExampleHasStdoutOtherTest(unittest.TestCase): def test_dummy(self): - self.assertEqual(hello(), "Hello, World!") + self.assertEqual(other(), "Hello, World!") def test_hello(self): - self.assertEqual(hello(), "Hello, World!") + self.assertEqual(other(), "Hello, World!") if __name__ == "__main__": diff --git a/test/example-has-stdout/results.json b/test/example-has-stdout/results.json index f4d4e91..9a22c7b 100644 --- a/test/example-has-stdout/results.json +++ b/test/example-has-stdout/results.json @@ -19,7 +19,7 @@ "output": "Hello, World!" }, { - "name": "ExampleHasStdout > trancation", + "name": "ExampleHasStdout > truncation", "status": "fail", "message": "AssertionError: 'Goodbye!' != 'Hello, World!'\n- Goodbye!\n+ Hello, World!", "test_code": "self.assertEqual(must_truncate(), \"Hello, World!\")", @@ -29,18 +29,18 @@ { "name": "ExampleHasStdoutOther > dummy", "status": "fail", - "message": "AssertionError: None != 'Hello, World!'", - "test_code": "self.assertEqual(hello(), \"Hello, World!\")", + "message": "AssertionError: 'hello, world.' != 'Hello, World!'\n- hello, world.\n? ^ ^ ^\n+ Hello, World!\n? ^ ^ ^", + "test_code": "self.assertEqual(other(), \"Hello, World!\")", "task_id": 0, - "output": "Hello, World!" + "output": "Hello, World!\n5 6 7" }, { "name": "ExampleHasStdoutOther > hello", "status": "fail", - "message": "AssertionError: None != 'Hello, World!'", - "test_code": "self.assertEqual(hello(), \"Hello, World!\")", + "message": "AssertionError: 'hello, world.' != 'Hello, World!'\n- hello, world.\n? ^ ^ ^\n+ Hello, World!\n? ^ ^ ^", + "test_code": "self.assertEqual(other(), \"Hello, World!\")", "task_id": 0, - "output": "Hello, World!" + "output": "Hello, World!\n5 6 7" } ] } \ No newline at end of file diff --git a/test/example-partial-failure-with-subtests-and-stdout/example_partial_failure_with_subtests_and_stdout.py b/test/example-partial-failure-with-subtests-and-stdout/example_partial_failure_with_subtests_and_stdout.py new file mode 100644 index 0000000..967a508 --- /dev/null +++ b/test/example-partial-failure-with-subtests-and-stdout/example_partial_failure_with_subtests_and_stdout.py @@ -0,0 +1,65 @@ +"""This example code is adapted from a student example that can be +found here on our Discord server: +https://discord.com/channels/854117591135027261/1145433700054601759 + +It is pulled from the "Meltdown Mitigation" concept exercise for Python. + +Two issues came out of the example. +1. The subtest "u" representing failed subtests. + (see these lines of code in the pytest-subtests plugin for why that is being inserted: + https://github.com/pytest-dev/pytest-subtests/blob/main/src/pytest_subtests.py#L306-L308) + +2. The dumping of the stdout of subtests into the parent test. + See issue https://github.com/exercism/python-test-runner/issues/67 for more details. + +The fix is a reverse-engineer that parses the +parent output and places it in each subtest report output field. + +This was to avoid having to patch the pytest-subtests plugin or +import it into our test runner plugin to override it. + +This test case ensures that the reverse-engineering works for cases where not all +failing subtests have output. +""" + +def is_criticality_balanced(temperature, neutrons_emitted): + + if temperature < 800 and neutrons_emitted > 500 and temperature * neutrons_emitted < 500000: + return True + elif temperature > 800 and neutrons_emitted < 500 and temperature * neutrons_emitted > 500000: + print(temperature) + return False + else: + return False + + +def reactor_efficiency(voltage, current, theoretical_max_power): + generated_power = voltage * current + percentage_value = (generated_power / theoretical_max_power) * 100 + + if percentage_value < percentage_value: + print('green') + elif 30 < percentage_value <= 60: + print('orange') + elif percentage_value > 60 and percentage_value <= 30: + print('red') + elif percentage_value < 30: + print('black') + + +def fail_safe(temperature, neutrons_produced_per_second, threshold): + """Assess and return status code for the reactor. + + :param temperature: int or float - value of the temperature in kelvin. + :param neutrons_produced_per_second: int or float - neutron flux. + :param threshold: int or float - threshold for category. + :return: str - one of ('LOW', 'NORMAL', 'DANGER'). + + 1. 'LOW' -> `temperature * neutrons per second` < 90% of `threshold` + 2. 'NORMAL' -> `temperature * neutrons per second` +/- 10% of `threshold` + 3. 'DANGER' -> `temperature * neutrons per second` is not in the above-stated ranges + """ + + print('Ouptut Captured!!') + print("""Id donec ultrices tincidunt arcu non. Semper feugiat nibh sed pulvinar proin gravida hendrerit. Odio ut sem nulla pharetra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Suscipit adipiscing bibendum est ultricies integer quis. Vel elit scelerisque mauris pellentesque pulvinar. Quam nulla porttitor massa id neque aliquam vestibulum morbi blandit. Ac felis donec et odio pellentesque diam. Vitae tortor condimentum lacinia quis. Enim lobortis scelerisque fermentum dui faucibus in ornare quam. Dolor sit amet consectetur adipiscing elit duis tristique sollicitudin. Orci dapibus ultrices in iaculis nunc. Magna etiam tempor orci eu. Gravida in fermentum et sollicitudin ac orci phasellus egestas tellus. Amet nisl purus in mollis nunc sed. Odio ut sem nulla pharetra diam sit amet. Mi tempus imperdiet nulla malesuada pellentesque elit. Vulputate mi sit amet mauris. Feugiat vivamus at augue eget. Et leo duis ut diam quam nulla porttitor massa. Tincidunt lobortis feugiat vivamus at.""") + diff --git a/test/example-partial-failure-with-subtests-and-stdout/example_partial_failure_with_subtests_and_stdout_test.py b/test/example-partial-failure-with-subtests-and-stdout/example_partial_failure_with_subtests_and_stdout_test.py new file mode 100644 index 0000000..6f8ef1d --- /dev/null +++ b/test/example-partial-failure-with-subtests-and-stdout/example_partial_failure_with_subtests_and_stdout_test.py @@ -0,0 +1,97 @@ +import unittest +import pytest +from example_partial_failure_with_subtests_and_stdout import ( + is_criticality_balanced, + reactor_efficiency, + fail_safe +) + + +class ExamplePartialFailureWithSubtestsAndStdoutTest(unittest.TestCase): + """Test cases for Meltdown mitigation exercise. + """ + + @pytest.mark.task(taskno=1) + def test_is_criticality_balanced_with_passes(self): + """Testing border cases around typical points. + + T, n == (800, 500), (625, 800), (500, 1000), etc. + + No output should be generated in the test report here, since + passing subtests are not reported on. + + """ + + test_data = ((750, 650, True), (799, 501, True), (500, 600, True), + (1000, 800, False), (800, 500, False), (800, 500.01, False), + (799.99, 500, False), (500.01, 999.99, False), (625, 800, False), + (625.99, 800, False), (625.01, 799.99, False), (799.99, 500.01, True), + (624.99, 799.99, True), (500, 1000, False), (500.01, 1000, False), + (499.99, 1000, True)) + + for variant, data in enumerate(test_data, start=1): + temp, neutrons_emitted, expected = data + with self.subTest(f'variation #{variant}', temp=temp, neutrons_emitted=neutrons_emitted, expected=expected): + + # pylint: disable=assignment-from-no-return + actual_result = is_criticality_balanced(temp, neutrons_emitted) + failure_message = (f'Expected {expected} but calling is_criticality_balanced(temp={temp}, ' + f'neutrons_emitted={neutrons_emitted}) returned {actual_result}.') + # f'with T={temp} and neutrons={neutrons_emitted}') + self.assertEqual(actual_result, expected, failure_message) + + @pytest.mark.task(taskno=2) + def test_reactor_efficiency_with_some_subtest_output(self): + """Partial failure and output in the test report. + This should happen for: + - variants 5, 6, 7, 8, 10, 11, and 12. + + No output fields should be present for: + - the parent or for variations 1, 2, 3, 4, 9 or 13. + """ + + voltage = 10 + theoretical_max_power = 10000 + + # The numbers are chosen so that current == 10 x percentage + test_data = ((1000, 'green'), (999, 'green'), (800, 'green'), + (799, 'orange'), (700, 'orange'), (600, 'orange'), + (599, 'red'), (560, 'red'), (400, 'red'), (300, 'red'), + (299, 'black'), (200, 'black'), (0, 'black')) + + for variant, data in enumerate(test_data, start=1): + current, expected = data + with self.subTest(f'variation #{variant}', voltage=voltage, current=current, + theoretical_max_power=theoretical_max_power, expected=expected): + + # pylint: disable=assignment-from-no-return + actual_result = reactor_efficiency(voltage, current, theoretical_max_power) + failure_message = (f'Expected {expected} as a result, but calling reactor_efficiency(voltage={voltage}, ' + f'current={current}, theoretical_max_power={theoretical_max_power}) ' + f'returned {actual_result} ') + # f'with voltage={voltage}, current={current}, max_pow={theoretical_max_power}') + self.assertEqual(actual_result, expected, failure_message) + + @pytest.mark.task(taskno=3) + def test_fail_safe_with_output_truncation(self): + """All variations of this should fail and appear in the test report. + - All variations should have output. + - All output should have truncation warnings. + """ + + temp = 10 + threshold = 10000 + test_data = ((399, 'LOW'), (300, 'LOW'), (1, 'LOW'), + (0, 'LOW'), (901, 'NORMAL'), (1000, 'NORMAL'), + (1099, 'NORMAL'), (899, 'LOW'), (700, 'LOW'), + (400, 'LOW'), (1101, 'DANGER'), (1200, 'DANGER')) + + for variant, (neutrons_per_second, expected) in enumerate(test_data, start=1): + with self.subTest(f'variation #{variant}', temp=temp, neutrons_per_second=neutrons_per_second, + threshold=threshold, expected=expected): + + # pylint: disable=assignment-from-no-return + actual_result = fail_safe(temp, neutrons_per_second, threshold) + failure_message = (f'Expected {expected} but returned {actual_result} with T={temp}, ' + f'neutrons={neutrons_per_second}, threshold={threshold}') + self.assertEqual(actual_result, expected, failure_message) diff --git a/test/example-partial-failure-with-subtests-and-stdout/results.json b/test/example-partial-failure-with-subtests-and-stdout/results.json new file mode 100644 index 0000000..1150e8f --- /dev/null +++ b/test/example-partial-failure-with-subtests-and-stdout/results.json @@ -0,0 +1,220 @@ +{ + "version": 3, + "status": "fail", + "tests": [ + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > is criticality balanced with passes", + "status": "pass", + "test_code": "\"\"\"Testing border cases around typical points.\n\nT, n == (800, 500), (625, 800), (500, 1000), etc.\n\nNo output should be generated in the test report here, since\npassing subtests are not reported on.\n\n\"\"\"\n\ntest_data = ((750, 650, True), (799, 501, True), (500, 600, True),\n (1000, 800, False), (800, 500, False), (800, 500.01, False),\n (799.99, 500, False), (500.01, 999.99, False), (625, 800, False),\n (625.99, 800, False), (625.01, 799.99, False), (799.99, 500.01, True),\n (624.99, 799.99, True), (500, 1000, False), (500.01, 1000, False),\n (499.99, 1000, True))\n\nfor variant, data in enumerate(test_data, start=1):\n temp, neutrons_emitted, expected = data\n with self.subTest(f'variation #{variant}', temp=temp, neutrons_emitted=neutrons_emitted, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = is_criticality_balanced(temp, neutrons_emitted)\n failure_message = (f'Expected {expected} but calling is_criticality_balanced(temp={temp}, '\n f'neutrons_emitted={neutrons_emitted}) returned {actual_result}.')\n # f'with T={temp} and neutrons={neutrons_emitted}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 1 + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > reactor efficiency with some subtest output", + "status": "fail", + "message": "One or more variations of this test failed. Details can be found under each [variant#].", + "test_code": "\"\"\"Partial failure and output in the test report.\nThis should happen for:\n - variants 5, 6, 7, 8, 10, 11, and 12.\n\nNo output fields should be present for:\n - the parent or for variations 1, 2, 3, 4, 9 or 13.\n\"\"\"\n\nvoltage = 10\ntheoretical_max_power = 10000\n\n# The numbers are chosen so that current == 10 x percentage\ntest_data = ((1000, 'green'), (999, 'green'), (800, 'green'),\n (799, 'orange'), (700, 'orange'), (600, 'orange'),\n (599, 'red'), (560, 'red'), (400, 'red'), (300, 'red'),\n (299, 'black'), (200, 'black'), (0, 'black'))\n\nfor variant, data in enumerate(test_data, start=1):\n current, expected = data\n with self.subTest(f'variation #{variant}', voltage=voltage, current=current,\n theoretical_max_power=theoretical_max_power, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = reactor_efficiency(voltage, current, theoretical_max_power)\n failure_message = (f'Expected {expected} as a result, but calling reactor_efficiency(voltage={voltage}, '\n f'current={current}, theoretical_max_power={theoretical_max_power}) '\n f'returned {actual_result} ')\n # f'with voltage={voltage}, current={current}, max_pow={theoretical_max_power}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 2 + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > reactor efficiency with some subtest output [variation #1]", + "status": "fail", + "message": "AssertionError: None != 'green' : Expected green as a result, but calling reactor_efficiency(voltage=10, current=1000, theoretical_max_power=10000) returned None", + "test_code": "\"\"\"Partial failure and output in the test report.\nThis should happen for:\n - variants 5, 6, 7, 8, 10, 11, and 12.\n\nNo output fields should be present for:\n - the parent or for variations 1, 2, 3, 4, 9 or 13.\n\"\"\"\n\nvoltage = 10\ntheoretical_max_power = 10000\n\n# The numbers are chosen so that current == 10 x percentage\ntest_data = ((1000, 'green'), (999, 'green'), (800, 'green'),\n (799, 'orange'), (700, 'orange'), (600, 'orange'),\n (599, 'red'), (560, 'red'), (400, 'red'), (300, 'red'),\n (299, 'black'), (200, 'black'), (0, 'black'))\n\nfor variant, data in enumerate(test_data, start=1):\n current, expected = data\n with self.subTest(f'variation #{variant}', voltage=voltage, current=current,\n theoretical_max_power=theoretical_max_power, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = reactor_efficiency(voltage, current, theoretical_max_power)\n failure_message = (f'Expected {expected} as a result, but calling reactor_efficiency(voltage={voltage}, '\n f'current={current}, theoretical_max_power={theoretical_max_power}) '\n f'returned {actual_result} ')\n # f'with voltage={voltage}, current={current}, max_pow={theoretical_max_power}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 2 + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > reactor efficiency with some subtest output [variation #2]", + "status": "fail", + "message": "AssertionError: None != 'green' : Expected green as a result, but calling reactor_efficiency(voltage=10, current=999, theoretical_max_power=10000) returned None", + "test_code": "\"\"\"Partial failure and output in the test report.\nThis should happen for:\n - variants 5, 6, 7, 8, 10, 11, and 12.\n\nNo output fields should be present for:\n - the parent or for variations 1, 2, 3, 4, 9 or 13.\n\"\"\"\n\nvoltage = 10\ntheoretical_max_power = 10000\n\n# The numbers are chosen so that current == 10 x percentage\ntest_data = ((1000, 'green'), (999, 'green'), (800, 'green'),\n (799, 'orange'), (700, 'orange'), (600, 'orange'),\n (599, 'red'), (560, 'red'), (400, 'red'), (300, 'red'),\n (299, 'black'), (200, 'black'), (0, 'black'))\n\nfor variant, data in enumerate(test_data, start=1):\n current, expected = data\n with self.subTest(f'variation #{variant}', voltage=voltage, current=current,\n theoretical_max_power=theoretical_max_power, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = reactor_efficiency(voltage, current, theoretical_max_power)\n failure_message = (f'Expected {expected} as a result, but calling reactor_efficiency(voltage={voltage}, '\n f'current={current}, theoretical_max_power={theoretical_max_power}) '\n f'returned {actual_result} ')\n # f'with voltage={voltage}, current={current}, max_pow={theoretical_max_power}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 2 + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > reactor efficiency with some subtest output [variation #3]", + "status": "fail", + "message": "AssertionError: None != 'green' : Expected green as a result, but calling reactor_efficiency(voltage=10, current=800, theoretical_max_power=10000) returned None", + "test_code": "\"\"\"Partial failure and output in the test report.\nThis should happen for:\n - variants 5, 6, 7, 8, 10, 11, and 12.\n\nNo output fields should be present for:\n - the parent or for variations 1, 2, 3, 4, 9 or 13.\n\"\"\"\n\nvoltage = 10\ntheoretical_max_power = 10000\n\n# The numbers are chosen so that current == 10 x percentage\ntest_data = ((1000, 'green'), (999, 'green'), (800, 'green'),\n (799, 'orange'), (700, 'orange'), (600, 'orange'),\n (599, 'red'), (560, 'red'), (400, 'red'), (300, 'red'),\n (299, 'black'), (200, 'black'), (0, 'black'))\n\nfor variant, data in enumerate(test_data, start=1):\n current, expected = data\n with self.subTest(f'variation #{variant}', voltage=voltage, current=current,\n theoretical_max_power=theoretical_max_power, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = reactor_efficiency(voltage, current, theoretical_max_power)\n failure_message = (f'Expected {expected} as a result, but calling reactor_efficiency(voltage={voltage}, '\n f'current={current}, theoretical_max_power={theoretical_max_power}) '\n f'returned {actual_result} ')\n # f'with voltage={voltage}, current={current}, max_pow={theoretical_max_power}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 2 + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > reactor efficiency with some subtest output [variation #4]", + "status": "fail", + "message": "AssertionError: None != 'orange' : Expected orange as a result, but calling reactor_efficiency(voltage=10, current=799, theoretical_max_power=10000) returned None", + "test_code": "\"\"\"Partial failure and output in the test report.\nThis should happen for:\n - variants 5, 6, 7, 8, 10, 11, and 12.\n\nNo output fields should be present for:\n - the parent or for variations 1, 2, 3, 4, 9 or 13.\n\"\"\"\n\nvoltage = 10\ntheoretical_max_power = 10000\n\n# The numbers are chosen so that current == 10 x percentage\ntest_data = ((1000, 'green'), (999, 'green'), (800, 'green'),\n (799, 'orange'), (700, 'orange'), (600, 'orange'),\n (599, 'red'), (560, 'red'), (400, 'red'), (300, 'red'),\n (299, 'black'), (200, 'black'), (0, 'black'))\n\nfor variant, data in enumerate(test_data, start=1):\n current, expected = data\n with self.subTest(f'variation #{variant}', voltage=voltage, current=current,\n theoretical_max_power=theoretical_max_power, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = reactor_efficiency(voltage, current, theoretical_max_power)\n failure_message = (f'Expected {expected} as a result, but calling reactor_efficiency(voltage={voltage}, '\n f'current={current}, theoretical_max_power={theoretical_max_power}) '\n f'returned {actual_result} ')\n # f'with voltage={voltage}, current={current}, max_pow={theoretical_max_power}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 2 + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > reactor efficiency with some subtest output [variation #5]", + "status": "fail", + "message": "AssertionError: None != 'orange' : Expected orange as a result, but calling reactor_efficiency(voltage=10, current=700, theoretical_max_power=10000) returned None", + "test_code": "\"\"\"Partial failure and output in the test report.\nThis should happen for:\n - variants 5, 6, 7, 8, 10, 11, and 12.\n\nNo output fields should be present for:\n - the parent or for variations 1, 2, 3, 4, 9 or 13.\n\"\"\"\n\nvoltage = 10\ntheoretical_max_power = 10000\n\n# The numbers are chosen so that current == 10 x percentage\ntest_data = ((1000, 'green'), (999, 'green'), (800, 'green'),\n (799, 'orange'), (700, 'orange'), (600, 'orange'),\n (599, 'red'), (560, 'red'), (400, 'red'), (300, 'red'),\n (299, 'black'), (200, 'black'), (0, 'black'))\n\nfor variant, data in enumerate(test_data, start=1):\n current, expected = data\n with self.subTest(f'variation #{variant}', voltage=voltage, current=current,\n theoretical_max_power=theoretical_max_power, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = reactor_efficiency(voltage, current, theoretical_max_power)\n failure_message = (f'Expected {expected} as a result, but calling reactor_efficiency(voltage={voltage}, '\n f'current={current}, theoretical_max_power={theoretical_max_power}) '\n f'returned {actual_result} ')\n # f'with voltage={voltage}, current={current}, max_pow={theoretical_max_power}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 2, + "output": "orange" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > reactor efficiency with some subtest output [variation #6]", + "status": "fail", + "message": "AssertionError: None != 'orange' : Expected orange as a result, but calling reactor_efficiency(voltage=10, current=600, theoretical_max_power=10000) returned None", + "test_code": "\"\"\"Partial failure and output in the test report.\nThis should happen for:\n - variants 5, 6, 7, 8, 10, 11, and 12.\n\nNo output fields should be present for:\n - the parent or for variations 1, 2, 3, 4, 9 or 13.\n\"\"\"\n\nvoltage = 10\ntheoretical_max_power = 10000\n\n# The numbers are chosen so that current == 10 x percentage\ntest_data = ((1000, 'green'), (999, 'green'), (800, 'green'),\n (799, 'orange'), (700, 'orange'), (600, 'orange'),\n (599, 'red'), (560, 'red'), (400, 'red'), (300, 'red'),\n (299, 'black'), (200, 'black'), (0, 'black'))\n\nfor variant, data in enumerate(test_data, start=1):\n current, expected = data\n with self.subTest(f'variation #{variant}', voltage=voltage, current=current,\n theoretical_max_power=theoretical_max_power, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = reactor_efficiency(voltage, current, theoretical_max_power)\n failure_message = (f'Expected {expected} as a result, but calling reactor_efficiency(voltage={voltage}, '\n f'current={current}, theoretical_max_power={theoretical_max_power}) '\n f'returned {actual_result} ')\n # f'with voltage={voltage}, current={current}, max_pow={theoretical_max_power}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 2, + "output": "orange" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > reactor efficiency with some subtest output [variation #7]", + "status": "fail", + "message": "AssertionError: None != 'red' : Expected red as a result, but calling reactor_efficiency(voltage=10, current=599, theoretical_max_power=10000) returned None", + "test_code": "\"\"\"Partial failure and output in the test report.\nThis should happen for:\n - variants 5, 6, 7, 8, 10, 11, and 12.\n\nNo output fields should be present for:\n - the parent or for variations 1, 2, 3, 4, 9 or 13.\n\"\"\"\n\nvoltage = 10\ntheoretical_max_power = 10000\n\n# The numbers are chosen so that current == 10 x percentage\ntest_data = ((1000, 'green'), (999, 'green'), (800, 'green'),\n (799, 'orange'), (700, 'orange'), (600, 'orange'),\n (599, 'red'), (560, 'red'), (400, 'red'), (300, 'red'),\n (299, 'black'), (200, 'black'), (0, 'black'))\n\nfor variant, data in enumerate(test_data, start=1):\n current, expected = data\n with self.subTest(f'variation #{variant}', voltage=voltage, current=current,\n theoretical_max_power=theoretical_max_power, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = reactor_efficiency(voltage, current, theoretical_max_power)\n failure_message = (f'Expected {expected} as a result, but calling reactor_efficiency(voltage={voltage}, '\n f'current={current}, theoretical_max_power={theoretical_max_power}) '\n f'returned {actual_result} ')\n # f'with voltage={voltage}, current={current}, max_pow={theoretical_max_power}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 2, + "output": "orange" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > reactor efficiency with some subtest output [variation #8]", + "status": "fail", + "message": "AssertionError: None != 'red' : Expected red as a result, but calling reactor_efficiency(voltage=10, current=560, theoretical_max_power=10000) returned None", + "test_code": "\"\"\"Partial failure and output in the test report.\nThis should happen for:\n - variants 5, 6, 7, 8, 10, 11, and 12.\n\nNo output fields should be present for:\n - the parent or for variations 1, 2, 3, 4, 9 or 13.\n\"\"\"\n\nvoltage = 10\ntheoretical_max_power = 10000\n\n# The numbers are chosen so that current == 10 x percentage\ntest_data = ((1000, 'green'), (999, 'green'), (800, 'green'),\n (799, 'orange'), (700, 'orange'), (600, 'orange'),\n (599, 'red'), (560, 'red'), (400, 'red'), (300, 'red'),\n (299, 'black'), (200, 'black'), (0, 'black'))\n\nfor variant, data in enumerate(test_data, start=1):\n current, expected = data\n with self.subTest(f'variation #{variant}', voltage=voltage, current=current,\n theoretical_max_power=theoretical_max_power, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = reactor_efficiency(voltage, current, theoretical_max_power)\n failure_message = (f'Expected {expected} as a result, but calling reactor_efficiency(voltage={voltage}, '\n f'current={current}, theoretical_max_power={theoretical_max_power}) '\n f'returned {actual_result} ')\n # f'with voltage={voltage}, current={current}, max_pow={theoretical_max_power}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 2, + "output": "orange" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > reactor efficiency with some subtest output [variation #9]", + "status": "fail", + "message": "AssertionError: None != 'red' : Expected red as a result, but calling reactor_efficiency(voltage=10, current=400, theoretical_max_power=10000) returned None", + "test_code": "\"\"\"Partial failure and output in the test report.\nThis should happen for:\n - variants 5, 6, 7, 8, 10, 11, and 12.\n\nNo output fields should be present for:\n - the parent or for variations 1, 2, 3, 4, 9 or 13.\n\"\"\"\n\nvoltage = 10\ntheoretical_max_power = 10000\n\n# The numbers are chosen so that current == 10 x percentage\ntest_data = ((1000, 'green'), (999, 'green'), (800, 'green'),\n (799, 'orange'), (700, 'orange'), (600, 'orange'),\n (599, 'red'), (560, 'red'), (400, 'red'), (300, 'red'),\n (299, 'black'), (200, 'black'), (0, 'black'))\n\nfor variant, data in enumerate(test_data, start=1):\n current, expected = data\n with self.subTest(f'variation #{variant}', voltage=voltage, current=current,\n theoretical_max_power=theoretical_max_power, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = reactor_efficiency(voltage, current, theoretical_max_power)\n failure_message = (f'Expected {expected} as a result, but calling reactor_efficiency(voltage={voltage}, '\n f'current={current}, theoretical_max_power={theoretical_max_power}) '\n f'returned {actual_result} ')\n # f'with voltage={voltage}, current={current}, max_pow={theoretical_max_power}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 2 + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > reactor efficiency with some subtest output [variation #10]", + "status": "fail", + "message": "AssertionError: None != 'red' : Expected red as a result, but calling reactor_efficiency(voltage=10, current=300, theoretical_max_power=10000) returned None", + "test_code": "\"\"\"Partial failure and output in the test report.\nThis should happen for:\n - variants 5, 6, 7, 8, 10, 11, and 12.\n\nNo output fields should be present for:\n - the parent or for variations 1, 2, 3, 4, 9 or 13.\n\"\"\"\n\nvoltage = 10\ntheoretical_max_power = 10000\n\n# The numbers are chosen so that current == 10 x percentage\ntest_data = ((1000, 'green'), (999, 'green'), (800, 'green'),\n (799, 'orange'), (700, 'orange'), (600, 'orange'),\n (599, 'red'), (560, 'red'), (400, 'red'), (300, 'red'),\n (299, 'black'), (200, 'black'), (0, 'black'))\n\nfor variant, data in enumerate(test_data, start=1):\n current, expected = data\n with self.subTest(f'variation #{variant}', voltage=voltage, current=current,\n theoretical_max_power=theoretical_max_power, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = reactor_efficiency(voltage, current, theoretical_max_power)\n failure_message = (f'Expected {expected} as a result, but calling reactor_efficiency(voltage={voltage}, '\n f'current={current}, theoretical_max_power={theoretical_max_power}) '\n f'returned {actual_result} ')\n # f'with voltage={voltage}, current={current}, max_pow={theoretical_max_power}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 2, + "output": "black" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > reactor efficiency with some subtest output [variation #11]", + "status": "fail", + "message": "AssertionError: None != 'black' : Expected black as a result, but calling reactor_efficiency(voltage=10, current=299, theoretical_max_power=10000) returned None", + "test_code": "\"\"\"Partial failure and output in the test report.\nThis should happen for:\n - variants 5, 6, 7, 8, 10, 11, and 12.\n\nNo output fields should be present for:\n - the parent or for variations 1, 2, 3, 4, 9 or 13.\n\"\"\"\n\nvoltage = 10\ntheoretical_max_power = 10000\n\n# The numbers are chosen so that current == 10 x percentage\ntest_data = ((1000, 'green'), (999, 'green'), (800, 'green'),\n (799, 'orange'), (700, 'orange'), (600, 'orange'),\n (599, 'red'), (560, 'red'), (400, 'red'), (300, 'red'),\n (299, 'black'), (200, 'black'), (0, 'black'))\n\nfor variant, data in enumerate(test_data, start=1):\n current, expected = data\n with self.subTest(f'variation #{variant}', voltage=voltage, current=current,\n theoretical_max_power=theoretical_max_power, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = reactor_efficiency(voltage, current, theoretical_max_power)\n failure_message = (f'Expected {expected} as a result, but calling reactor_efficiency(voltage={voltage}, '\n f'current={current}, theoretical_max_power={theoretical_max_power}) '\n f'returned {actual_result} ')\n # f'with voltage={voltage}, current={current}, max_pow={theoretical_max_power}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 2, + "output": "black" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > reactor efficiency with some subtest output [variation #12]", + "status": "fail", + "message": "AssertionError: None != 'black' : Expected black as a result, but calling reactor_efficiency(voltage=10, current=200, theoretical_max_power=10000) returned None", + "test_code": "\"\"\"Partial failure and output in the test report.\nThis should happen for:\n - variants 5, 6, 7, 8, 10, 11, and 12.\n\nNo output fields should be present for:\n - the parent or for variations 1, 2, 3, 4, 9 or 13.\n\"\"\"\n\nvoltage = 10\ntheoretical_max_power = 10000\n\n# The numbers are chosen so that current == 10 x percentage\ntest_data = ((1000, 'green'), (999, 'green'), (800, 'green'),\n (799, 'orange'), (700, 'orange'), (600, 'orange'),\n (599, 'red'), (560, 'red'), (400, 'red'), (300, 'red'),\n (299, 'black'), (200, 'black'), (0, 'black'))\n\nfor variant, data in enumerate(test_data, start=1):\n current, expected = data\n with self.subTest(f'variation #{variant}', voltage=voltage, current=current,\n theoretical_max_power=theoretical_max_power, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = reactor_efficiency(voltage, current, theoretical_max_power)\n failure_message = (f'Expected {expected} as a result, but calling reactor_efficiency(voltage={voltage}, '\n f'current={current}, theoretical_max_power={theoretical_max_power}) '\n f'returned {actual_result} ')\n # f'with voltage={voltage}, current={current}, max_pow={theoretical_max_power}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 2, + "output": "black" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > reactor efficiency with some subtest output [variation #13]", + "status": "fail", + "message": "AssertionError: None != 'black' : Expected black as a result, but calling reactor_efficiency(voltage=10, current=0, theoretical_max_power=10000) returned None", + "test_code": "\"\"\"Partial failure and output in the test report.\nThis should happen for:\n - variants 5, 6, 7, 8, 10, 11, and 12.\n\nNo output fields should be present for:\n - the parent or for variations 1, 2, 3, 4, 9 or 13.\n\"\"\"\n\nvoltage = 10\ntheoretical_max_power = 10000\n\n# The numbers are chosen so that current == 10 x percentage\ntest_data = ((1000, 'green'), (999, 'green'), (800, 'green'),\n (799, 'orange'), (700, 'orange'), (600, 'orange'),\n (599, 'red'), (560, 'red'), (400, 'red'), (300, 'red'),\n (299, 'black'), (200, 'black'), (0, 'black'))\n\nfor variant, data in enumerate(test_data, start=1):\n current, expected = data\n with self.subTest(f'variation #{variant}', voltage=voltage, current=current,\n theoretical_max_power=theoretical_max_power, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = reactor_efficiency(voltage, current, theoretical_max_power)\n failure_message = (f'Expected {expected} as a result, but calling reactor_efficiency(voltage={voltage}, '\n f'current={current}, theoretical_max_power={theoretical_max_power}) '\n f'returned {actual_result} ')\n # f'with voltage={voltage}, current={current}, max_pow={theoretical_max_power}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 2 + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > fail safe with output truncation", + "status": "fail", + "message": "One or more variations of this test failed. Details can be found under each [variant#].", + "test_code": "\"\"\"All variations of this should fail and appear in the test report.\n - All variations should have output.\n - All output should have truncation warnings.\n\"\"\"\n\ntemp = 10\nthreshold = 10000\ntest_data = ((399, 'LOW'), (300, 'LOW'), (1, 'LOW'),\n (0, 'LOW'), (901, 'NORMAL'), (1000, 'NORMAL'),\n (1099, 'NORMAL'), (899, 'LOW'), (700, 'LOW'),\n (400, 'LOW'), (1101, 'DANGER'), (1200, 'DANGER'))\n\nfor variant, (neutrons_per_second, expected) in enumerate(test_data, start=1):\n with self.subTest(f'variation #{variant}', temp=temp, neutrons_per_second=neutrons_per_second,\n threshold=threshold, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = fail_safe(temp, neutrons_per_second, threshold)\n failure_message = (f'Expected {expected} but returned {actual_result} with T={temp}, '\n f'neutrons={neutrons_per_second}, threshold={threshold}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 3 + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > fail safe with output truncation [variation #1]", + "status": "fail", + "message": "AssertionError: None != 'LOW' : Expected LOW but returned None with T=10, neutrons=399, threshold=10000", + "test_code": "\"\"\"All variations of this should fail and appear in the test report.\n - All variations should have output.\n - All output should have truncation warnings.\n\"\"\"\n\ntemp = 10\nthreshold = 10000\ntest_data = ((399, 'LOW'), (300, 'LOW'), (1, 'LOW'),\n (0, 'LOW'), (901, 'NORMAL'), (1000, 'NORMAL'),\n (1099, 'NORMAL'), (899, 'LOW'), (700, 'LOW'),\n (400, 'LOW'), (1101, 'DANGER'), (1200, 'DANGER'))\n\nfor variant, (neutrons_per_second, expected) in enumerate(test_data, start=1):\n with self.subTest(f'variation #{variant}', temp=temp, neutrons_per_second=neutrons_per_second,\n threshold=threshold, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = fail_safe(temp, neutrons_per_second, threshold)\n failure_message = (f'Expected {expected} but returned {actual_result} with T={temp}, '\n f'neutrons={neutrons_per_second}, threshold={threshold}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 3, + "output": "Ouptut Captured!!\nId donec ultrices tincidunt arcu non. Semper feugiat nibh sed pulvinar proin gravida hendrerit. Odio ut sem nulla pharetra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Suscipit adipiscing bibendum est ultricies integer quis. Vel elit scelerisque mauris pellentesque pulvinar. Quam nulla porttitor massa id neque aliquam vestibulum morbi blandit. Ac felis donec et odio pellentesque diam. Vitae tortor condimentum [Output was truncated. Please limit to 500 chars]" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > fail safe with output truncation [variation #2]", + "status": "fail", + "message": "AssertionError: None != 'LOW' : Expected LOW but returned None with T=10, neutrons=300, threshold=10000", + "test_code": "\"\"\"All variations of this should fail and appear in the test report.\n - All variations should have output.\n - All output should have truncation warnings.\n\"\"\"\n\ntemp = 10\nthreshold = 10000\ntest_data = ((399, 'LOW'), (300, 'LOW'), (1, 'LOW'),\n (0, 'LOW'), (901, 'NORMAL'), (1000, 'NORMAL'),\n (1099, 'NORMAL'), (899, 'LOW'), (700, 'LOW'),\n (400, 'LOW'), (1101, 'DANGER'), (1200, 'DANGER'))\n\nfor variant, (neutrons_per_second, expected) in enumerate(test_data, start=1):\n with self.subTest(f'variation #{variant}', temp=temp, neutrons_per_second=neutrons_per_second,\n threshold=threshold, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = fail_safe(temp, neutrons_per_second, threshold)\n failure_message = (f'Expected {expected} but returned {actual_result} with T={temp}, '\n f'neutrons={neutrons_per_second}, threshold={threshold}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 3, + "output": "Ouptut Captured!!\nId donec ultrices tincidunt arcu non. Semper feugiat nibh sed pulvinar proin gravida hendrerit. Odio ut sem nulla pharetra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Suscipit adipiscing bibendum est ultricies integer quis. Vel elit scelerisque mauris pellentesque pulvinar. Quam nulla porttitor massa id neque aliquam vestibulum morbi blandit. Ac felis donec et odio pellentesque diam. Vitae tortor condimentum [Output was truncated. Please limit to 500 chars]" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > fail safe with output truncation [variation #3]", + "status": "fail", + "message": "AssertionError: None != 'LOW' : Expected LOW but returned None with T=10, neutrons=1, threshold=10000", + "test_code": "\"\"\"All variations of this should fail and appear in the test report.\n - All variations should have output.\n - All output should have truncation warnings.\n\"\"\"\n\ntemp = 10\nthreshold = 10000\ntest_data = ((399, 'LOW'), (300, 'LOW'), (1, 'LOW'),\n (0, 'LOW'), (901, 'NORMAL'), (1000, 'NORMAL'),\n (1099, 'NORMAL'), (899, 'LOW'), (700, 'LOW'),\n (400, 'LOW'), (1101, 'DANGER'), (1200, 'DANGER'))\n\nfor variant, (neutrons_per_second, expected) in enumerate(test_data, start=1):\n with self.subTest(f'variation #{variant}', temp=temp, neutrons_per_second=neutrons_per_second,\n threshold=threshold, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = fail_safe(temp, neutrons_per_second, threshold)\n failure_message = (f'Expected {expected} but returned {actual_result} with T={temp}, '\n f'neutrons={neutrons_per_second}, threshold={threshold}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 3, + "output": "Ouptut Captured!!\nId donec ultrices tincidunt arcu non. Semper feugiat nibh sed pulvinar proin gravida hendrerit. Odio ut sem nulla pharetra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Suscipit adipiscing bibendum est ultricies integer quis. Vel elit scelerisque mauris pellentesque pulvinar. Quam nulla porttitor massa id neque aliquam vestibulum morbi blandit. Ac felis donec et odio pellentesque diam. Vitae tortor condimentum [Output was truncated. Please limit to 500 chars]" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > fail safe with output truncation [variation #4]", + "status": "fail", + "message": "AssertionError: None != 'LOW' : Expected LOW but returned None with T=10, neutrons=0, threshold=10000", + "test_code": "\"\"\"All variations of this should fail and appear in the test report.\n - All variations should have output.\n - All output should have truncation warnings.\n\"\"\"\n\ntemp = 10\nthreshold = 10000\ntest_data = ((399, 'LOW'), (300, 'LOW'), (1, 'LOW'),\n (0, 'LOW'), (901, 'NORMAL'), (1000, 'NORMAL'),\n (1099, 'NORMAL'), (899, 'LOW'), (700, 'LOW'),\n (400, 'LOW'), (1101, 'DANGER'), (1200, 'DANGER'))\n\nfor variant, (neutrons_per_second, expected) in enumerate(test_data, start=1):\n with self.subTest(f'variation #{variant}', temp=temp, neutrons_per_second=neutrons_per_second,\n threshold=threshold, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = fail_safe(temp, neutrons_per_second, threshold)\n failure_message = (f'Expected {expected} but returned {actual_result} with T={temp}, '\n f'neutrons={neutrons_per_second}, threshold={threshold}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 3, + "output": "Ouptut Captured!!\nId donec ultrices tincidunt arcu non. Semper feugiat nibh sed pulvinar proin gravida hendrerit. Odio ut sem nulla pharetra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Suscipit adipiscing bibendum est ultricies integer quis. Vel elit scelerisque mauris pellentesque pulvinar. Quam nulla porttitor massa id neque aliquam vestibulum morbi blandit. Ac felis donec et odio pellentesque diam. Vitae tortor condimentum [Output was truncated. Please limit to 500 chars]" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > fail safe with output truncation [variation #5]", + "status": "fail", + "message": "AssertionError: None != 'NORMAL' : Expected NORMAL but returned None with T=10, neutrons=901, threshold=10000", + "test_code": "\"\"\"All variations of this should fail and appear in the test report.\n - All variations should have output.\n - All output should have truncation warnings.\n\"\"\"\n\ntemp = 10\nthreshold = 10000\ntest_data = ((399, 'LOW'), (300, 'LOW'), (1, 'LOW'),\n (0, 'LOW'), (901, 'NORMAL'), (1000, 'NORMAL'),\n (1099, 'NORMAL'), (899, 'LOW'), (700, 'LOW'),\n (400, 'LOW'), (1101, 'DANGER'), (1200, 'DANGER'))\n\nfor variant, (neutrons_per_second, expected) in enumerate(test_data, start=1):\n with self.subTest(f'variation #{variant}', temp=temp, neutrons_per_second=neutrons_per_second,\n threshold=threshold, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = fail_safe(temp, neutrons_per_second, threshold)\n failure_message = (f'Expected {expected} but returned {actual_result} with T={temp}, '\n f'neutrons={neutrons_per_second}, threshold={threshold}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 3, + "output": "Ouptut Captured!!\nId donec ultrices tincidunt arcu non. Semper feugiat nibh sed pulvinar proin gravida hendrerit. Odio ut sem nulla pharetra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Suscipit adipiscing bibendum est ultricies integer quis. Vel elit scelerisque mauris pellentesque pulvinar. Quam nulla porttitor massa id neque aliquam vestibulum morbi blandit. Ac felis donec et odio pellentesque diam. Vitae tortor condimentum [Output was truncated. Please limit to 500 chars]" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > fail safe with output truncation [variation #6]", + "status": "fail", + "message": "AssertionError: None != 'NORMAL' : Expected NORMAL but returned None with T=10, neutrons=1000, threshold=10000", + "test_code": "\"\"\"All variations of this should fail and appear in the test report.\n - All variations should have output.\n - All output should have truncation warnings.\n\"\"\"\n\ntemp = 10\nthreshold = 10000\ntest_data = ((399, 'LOW'), (300, 'LOW'), (1, 'LOW'),\n (0, 'LOW'), (901, 'NORMAL'), (1000, 'NORMAL'),\n (1099, 'NORMAL'), (899, 'LOW'), (700, 'LOW'),\n (400, 'LOW'), (1101, 'DANGER'), (1200, 'DANGER'))\n\nfor variant, (neutrons_per_second, expected) in enumerate(test_data, start=1):\n with self.subTest(f'variation #{variant}', temp=temp, neutrons_per_second=neutrons_per_second,\n threshold=threshold, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = fail_safe(temp, neutrons_per_second, threshold)\n failure_message = (f'Expected {expected} but returned {actual_result} with T={temp}, '\n f'neutrons={neutrons_per_second}, threshold={threshold}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 3, + "output": "Ouptut Captured!!\nId donec ultrices tincidunt arcu non. Semper feugiat nibh sed pulvinar proin gravida hendrerit. Odio ut sem nulla pharetra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Suscipit adipiscing bibendum est ultricies integer quis. Vel elit scelerisque mauris pellentesque pulvinar. Quam nulla porttitor massa id neque aliquam vestibulum morbi blandit. Ac felis donec et odio pellentesque diam. Vitae tortor condimentum [Output was truncated. Please limit to 500 chars]" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > fail safe with output truncation [variation #7]", + "status": "fail", + "message": "AssertionError: None != 'NORMAL' : Expected NORMAL but returned None with T=10, neutrons=1099, threshold=10000", + "test_code": "\"\"\"All variations of this should fail and appear in the test report.\n - All variations should have output.\n - All output should have truncation warnings.\n\"\"\"\n\ntemp = 10\nthreshold = 10000\ntest_data = ((399, 'LOW'), (300, 'LOW'), (1, 'LOW'),\n (0, 'LOW'), (901, 'NORMAL'), (1000, 'NORMAL'),\n (1099, 'NORMAL'), (899, 'LOW'), (700, 'LOW'),\n (400, 'LOW'), (1101, 'DANGER'), (1200, 'DANGER'))\n\nfor variant, (neutrons_per_second, expected) in enumerate(test_data, start=1):\n with self.subTest(f'variation #{variant}', temp=temp, neutrons_per_second=neutrons_per_second,\n threshold=threshold, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = fail_safe(temp, neutrons_per_second, threshold)\n failure_message = (f'Expected {expected} but returned {actual_result} with T={temp}, '\n f'neutrons={neutrons_per_second}, threshold={threshold}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 3, + "output": "Ouptut Captured!!\nId donec ultrices tincidunt arcu non. Semper feugiat nibh sed pulvinar proin gravida hendrerit. Odio ut sem nulla pharetra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Suscipit adipiscing bibendum est ultricies integer quis. Vel elit scelerisque mauris pellentesque pulvinar. Quam nulla porttitor massa id neque aliquam vestibulum morbi blandit. Ac felis donec et odio pellentesque diam. Vitae tortor condimentum [Output was truncated. Please limit to 500 chars]" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > fail safe with output truncation [variation #8]", + "status": "fail", + "message": "AssertionError: None != 'LOW' : Expected LOW but returned None with T=10, neutrons=899, threshold=10000", + "test_code": "\"\"\"All variations of this should fail and appear in the test report.\n - All variations should have output.\n - All output should have truncation warnings.\n\"\"\"\n\ntemp = 10\nthreshold = 10000\ntest_data = ((399, 'LOW'), (300, 'LOW'), (1, 'LOW'),\n (0, 'LOW'), (901, 'NORMAL'), (1000, 'NORMAL'),\n (1099, 'NORMAL'), (899, 'LOW'), (700, 'LOW'),\n (400, 'LOW'), (1101, 'DANGER'), (1200, 'DANGER'))\n\nfor variant, (neutrons_per_second, expected) in enumerate(test_data, start=1):\n with self.subTest(f'variation #{variant}', temp=temp, neutrons_per_second=neutrons_per_second,\n threshold=threshold, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = fail_safe(temp, neutrons_per_second, threshold)\n failure_message = (f'Expected {expected} but returned {actual_result} with T={temp}, '\n f'neutrons={neutrons_per_second}, threshold={threshold}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 3, + "output": "Ouptut Captured!!\nId donec ultrices tincidunt arcu non. Semper feugiat nibh sed pulvinar proin gravida hendrerit. Odio ut sem nulla pharetra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Suscipit adipiscing bibendum est ultricies integer quis. Vel elit scelerisque mauris pellentesque pulvinar. Quam nulla porttitor massa id neque aliquam vestibulum morbi blandit. Ac felis donec et odio pellentesque diam. Vitae tortor condimentum [Output was truncated. Please limit to 500 chars]" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > fail safe with output truncation [variation #9]", + "status": "fail", + "message": "AssertionError: None != 'LOW' : Expected LOW but returned None with T=10, neutrons=700, threshold=10000", + "test_code": "\"\"\"All variations of this should fail and appear in the test report.\n - All variations should have output.\n - All output should have truncation warnings.\n\"\"\"\n\ntemp = 10\nthreshold = 10000\ntest_data = ((399, 'LOW'), (300, 'LOW'), (1, 'LOW'),\n (0, 'LOW'), (901, 'NORMAL'), (1000, 'NORMAL'),\n (1099, 'NORMAL'), (899, 'LOW'), (700, 'LOW'),\n (400, 'LOW'), (1101, 'DANGER'), (1200, 'DANGER'))\n\nfor variant, (neutrons_per_second, expected) in enumerate(test_data, start=1):\n with self.subTest(f'variation #{variant}', temp=temp, neutrons_per_second=neutrons_per_second,\n threshold=threshold, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = fail_safe(temp, neutrons_per_second, threshold)\n failure_message = (f'Expected {expected} but returned {actual_result} with T={temp}, '\n f'neutrons={neutrons_per_second}, threshold={threshold}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 3, + "output": "Ouptut Captured!!\nId donec ultrices tincidunt arcu non. Semper feugiat nibh sed pulvinar proin gravida hendrerit. Odio ut sem nulla pharetra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Suscipit adipiscing bibendum est ultricies integer quis. Vel elit scelerisque mauris pellentesque pulvinar. Quam nulla porttitor massa id neque aliquam vestibulum morbi blandit. Ac felis donec et odio pellentesque diam. Vitae tortor condimentum [Output was truncated. Please limit to 500 chars]" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > fail safe with output truncation [variation #10]", + "status": "fail", + "message": "AssertionError: None != 'LOW' : Expected LOW but returned None with T=10, neutrons=400, threshold=10000", + "test_code": "\"\"\"All variations of this should fail and appear in the test report.\n - All variations should have output.\n - All output should have truncation warnings.\n\"\"\"\n\ntemp = 10\nthreshold = 10000\ntest_data = ((399, 'LOW'), (300, 'LOW'), (1, 'LOW'),\n (0, 'LOW'), (901, 'NORMAL'), (1000, 'NORMAL'),\n (1099, 'NORMAL'), (899, 'LOW'), (700, 'LOW'),\n (400, 'LOW'), (1101, 'DANGER'), (1200, 'DANGER'))\n\nfor variant, (neutrons_per_second, expected) in enumerate(test_data, start=1):\n with self.subTest(f'variation #{variant}', temp=temp, neutrons_per_second=neutrons_per_second,\n threshold=threshold, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = fail_safe(temp, neutrons_per_second, threshold)\n failure_message = (f'Expected {expected} but returned {actual_result} with T={temp}, '\n f'neutrons={neutrons_per_second}, threshold={threshold}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 3, + "output": "Ouptut Captured!!\nId donec ultrices tincidunt arcu non. Semper feugiat nibh sed pulvinar proin gravida hendrerit. Odio ut sem nulla pharetra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Suscipit adipiscing bibendum est ultricies integer quis. Vel elit scelerisque mauris pellentesque pulvinar. Quam nulla porttitor massa id neque aliquam vestibulum morbi blandit. Ac felis donec et odio pellentesque diam. Vitae tortor condimentum [Output was truncated. Please limit to 500 chars]" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > fail safe with output truncation [variation #11]", + "status": "fail", + "message": "AssertionError: None != 'DANGER' : Expected DANGER but returned None with T=10, neutrons=1101, threshold=10000", + "test_code": "\"\"\"All variations of this should fail and appear in the test report.\n - All variations should have output.\n - All output should have truncation warnings.\n\"\"\"\n\ntemp = 10\nthreshold = 10000\ntest_data = ((399, 'LOW'), (300, 'LOW'), (1, 'LOW'),\n (0, 'LOW'), (901, 'NORMAL'), (1000, 'NORMAL'),\n (1099, 'NORMAL'), (899, 'LOW'), (700, 'LOW'),\n (400, 'LOW'), (1101, 'DANGER'), (1200, 'DANGER'))\n\nfor variant, (neutrons_per_second, expected) in enumerate(test_data, start=1):\n with self.subTest(f'variation #{variant}', temp=temp, neutrons_per_second=neutrons_per_second,\n threshold=threshold, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = fail_safe(temp, neutrons_per_second, threshold)\n failure_message = (f'Expected {expected} but returned {actual_result} with T={temp}, '\n f'neutrons={neutrons_per_second}, threshold={threshold}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 3, + "output": "Ouptut Captured!!\nId donec ultrices tincidunt arcu non. Semper feugiat nibh sed pulvinar proin gravida hendrerit. Odio ut sem nulla pharetra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Suscipit adipiscing bibendum est ultricies integer quis. Vel elit scelerisque mauris pellentesque pulvinar. Quam nulla porttitor massa id neque aliquam vestibulum morbi blandit. Ac felis donec et odio pellentesque diam. Vitae tortor condimentum [Output was truncated. Please limit to 500 chars]" + }, + { + "name": "ExamplePartialFailureWithSubtestsAndStdout > fail safe with output truncation [variation #12]", + "status": "fail", + "message": "AssertionError: None != 'DANGER' : Expected DANGER but returned None with T=10, neutrons=1200, threshold=10000", + "test_code": "\"\"\"All variations of this should fail and appear in the test report.\n - All variations should have output.\n - All output should have truncation warnings.\n\"\"\"\n\ntemp = 10\nthreshold = 10000\ntest_data = ((399, 'LOW'), (300, 'LOW'), (1, 'LOW'),\n (0, 'LOW'), (901, 'NORMAL'), (1000, 'NORMAL'),\n (1099, 'NORMAL'), (899, 'LOW'), (700, 'LOW'),\n (400, 'LOW'), (1101, 'DANGER'), (1200, 'DANGER'))\n\nfor variant, (neutrons_per_second, expected) in enumerate(test_data, start=1):\n with self.subTest(f'variation #{variant}', temp=temp, neutrons_per_second=neutrons_per_second,\n threshold=threshold, expected=expected):\n\n # pylint: disable=assignment-from-no-return\n actual_result = fail_safe(temp, neutrons_per_second, threshold)\n failure_message = (f'Expected {expected} but returned {actual_result} with T={temp}, '\n f'neutrons={neutrons_per_second}, threshold={threshold}')\n self.assertEqual(actual_result, expected, failure_message)", + "task_id": 3, + "output": "Ouptut Captured!!\nId donec ultrices tincidunt arcu non. Semper feugiat nibh sed pulvinar proin gravida hendrerit. Odio ut sem nulla pharetra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Suscipit adipiscing bibendum est ultricies integer quis. Vel elit scelerisque mauris pellentesque pulvinar. Quam nulla porttitor massa id neque aliquam vestibulum morbi blandit. Ac felis donec et odio pellentesque diam. Vitae tortor condimentum [Output was truncated. Please limit to 500 chars]" + } + ] +} \ No newline at end of file diff --git a/test/example-partial-failure-with-subtests/results.json b/test/example-partial-failure-with-subtests/results.json index ad62744..50baef5 100644 --- a/test/example-partial-failure-with-subtests/results.json +++ b/test/example-partial-failure-with-subtests/results.json @@ -16,28 +16,28 @@ "task_id": 1 }, { - "name": "ExamplePartialFailureWithSubtests > hello [variation #1] (param=1, result=('Hello, World!', 1))", + "name": "ExamplePartialFailureWithSubtests > hello [variation #1]", "status": "fail", "message": "AssertionError: 'Hello, World!' != ('Hello, World!', 1) : Expected: ('Hello, World!', 1) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", "task_id": 1 }, { - "name": "ExamplePartialFailureWithSubtests > hello [variation #2] (param=2, result=('Hello, World!', 2))", + "name": "ExamplePartialFailureWithSubtests > hello [variation #2]", "status": "fail", "message": "AssertionError: 'Hello, World!' != ('Hello, World!', 2) : Expected: ('Hello, World!', 2) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", "task_id": 1 }, { - "name": "ExamplePartialFailureWithSubtests > hello [variation #3] (param=5, result=('Hello, World!', 5))", + "name": "ExamplePartialFailureWithSubtests > hello [variation #3]", "status": "fail", "message": "AssertionError: 'Hello, World!' != ('Hello, World!', 5) : Expected: ('Hello, World!', 5) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", "task_id": 1 }, { - "name": "ExamplePartialFailureWithSubtests > hello [variation #4] (param=10, result=('Hello, World!', 10))", + "name": "ExamplePartialFailureWithSubtests > hello [variation #4]", "status": "fail", "message": "AssertionError: 'Hello, World!' != ('Hello, World!', 10) : Expected: ('Hello, World!', 10) but got something else instead.", "test_code": "input_data = [1, 2, 5, 10]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", @@ -57,28 +57,28 @@ "task_id": 2 }, { - "name": "ExamplePartialFailureWithSubtestsOther > hello [variation #1] (param=15, result=('Hello, World!', 15))", + "name": "ExamplePartialFailureWithSubtestsOther > hello [variation #1]", "status": "fail", "message": "AssertionError: 'Hello, World!' != ('Hello, World!', 15) : Expected: ('Hello, World!', 15) but got something else instead.", "test_code": "input_data = [15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", "task_id": 2 }, { - "name": "ExamplePartialFailureWithSubtestsOther > hello [variation #2] (param=23, result=('Hello, World!', 23))", + "name": "ExamplePartialFailureWithSubtestsOther > hello [variation #2]", "status": "fail", "message": "AssertionError: 'Hello, World!' != ('Hello, World!', 23) : Expected: ('Hello, World!', 23) but got something else instead.", "test_code": "input_data = [15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", "task_id": 2 }, { - "name": "ExamplePartialFailureWithSubtestsOther > hello [variation #3] (param=33, result=('Hello, World!', 33))", + "name": "ExamplePartialFailureWithSubtestsOther > hello [variation #3]", "status": "fail", "message": "AssertionError: 'Hello, World!' != ('Hello, World!', 33) : Expected: ('Hello, World!', 33) but got something else instead.", "test_code": "input_data = [15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", "task_id": 2 }, { - "name": "ExamplePartialFailureWithSubtestsOther > hello [variation #4] (param=39, result=('Hello, World!', 39))", + "name": "ExamplePartialFailureWithSubtestsOther > hello [variation #4]", "status": "fail", "message": "AssertionError: 'Hello, World!' != ('Hello, World!', 39) : Expected: ('Hello, World!', 39) but got something else instead.", "test_code": "input_data = [15, 23, 33, 39]\nresult_data = [(\"Hello, World!\", param) for param in input_data]\n\nfor variant, (param, result) in enumerate(zip(input_data, result_data), start=1):\n with self.subTest(f\"variation #{variant}\", param=param, result=result):\n self.assertEqual(hello(param), result,\n msg=f'Expected: {result} but got something else instead.')", diff --git a/test/example-syntax-error/results.json b/test/example-syntax-error/results.json index 9c4117c..8c8c0fc 100644 --- a/test/example-syntax-error/results.json +++ b/test/example-syntax-error/results.json @@ -1,6 +1,6 @@ { "version": 3, "status": "error", - "message": " /usr/local/lib/python3.11/site-packages/_pytest/python.py:618: in _importtestmodule\n mod = import_path(self.path, mode=importmode, root=self.config.rootpath)\n/usr/local/lib/python3.11/site-packages/_pytest/pathlib.py:533: in import_path\n importlib.import_module(module_name)\n/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n:1206: in _gcd_import\n ???\n:1178: in _find_and_load\n ???\n:1149: in _find_and_load_unlocked\n ???\n:690: in _load_unlocked\n ???\n/usr/local/lib/python3.11/site-packages/_pytest/assertion/rewrite.py:168: in exec_module\n exec(co, module.__dict__)\ntest/example-syntax-error/example_syntax_error_test.py:4: in \n from example_syntax_error import hello\nE File \"./test/example-syntax-error/example_syntax_error.py\", line 3\nE def hello();\nE ^\nE SyntaxError: expected ':'", + "message": " /usr/local/lib/python3.11/site-packages/_pytest/python.py:618: in _importtestmodule\n mod = import_path(self.path, mode=importmode, root=self.config.rootpath)\n/usr/local/lib/python3.11/site-packages/_pytest/pathlib.py:533: in import_path\n importlib.import_module(module_name)\n/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n:1204: in _gcd_import\n ???\n:1176: in _find_and_load\n ???\n:1147: in _find_and_load_unlocked\n ???\n:690: in _load_unlocked\n ???\n/usr/local/lib/python3.11/site-packages/_pytest/assertion/rewrite.py:168: in exec_module\n exec(co, module.__dict__)\ntest/example-syntax-error/example_syntax_error_test.py:4: in \n from example_syntax_error import hello\nE File \"./test/example-syntax-error/example_syntax_error.py\", line 3\nE def hello();\nE ^\nE SyntaxError: expected ':'", "tests": [] -} \ No newline at end of file +}