Skip to content

Commit b380f21

Browse files
chore(ci_visibility): separate pytest outcome processing into its own function (#10960)
This separates the outcome processing for `pytest` into a separate function to allow for easier refactoring of code for Early Flake Detection retries. It also makes the status an optional argument for various `finish()` calls to allow for finishing without overriding the test status. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --------- Co-authored-by: Alberto Vara <[email protected]>
1 parent d8c8fa9 commit b380f21

File tree

4 files changed

+44
-26
lines changed

4 files changed

+44
-26
lines changed

ddtrace/contrib/pytest/_plugin_v2.py

+34-20
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
from ddtrace.ext import test
3030
from ddtrace.ext.test_visibility import ITR_SKIPPING_LEVEL
3131
from ddtrace.ext.test_visibility.api import TestExcInfo
32+
from ddtrace.ext.test_visibility.api import TestStatus
3233
from ddtrace.ext.test_visibility.api import disable_test_visibility
3334
from ddtrace.ext.test_visibility.api import enable_test_visibility
3435
from ddtrace.ext.test_visibility.api import is_test_visibility_enabled
@@ -54,6 +55,12 @@
5455
_NODEID_REGEX = re.compile("^((?P<module>.*)/(?P<suite>[^/]*?))::(?P<name>.*?)$")
5556

5657

58+
class _TestOutcome(t.NamedTuple):
59+
status: t.Optional[TestStatus] = None
60+
skip_reason: t.Optional[str] = None
61+
exc_info: t.Optional[TestExcInfo] = None
62+
63+
5764
def _handle_itr_should_skip(item, test_id) -> bool:
5865
"""Checks whether a test should be skipped
5966
@@ -71,11 +78,11 @@ def _handle_itr_should_skip(item, test_id) -> bool:
7178
# Marking the test as forced run also applies to its hierarchy
7279
InternalTest.mark_itr_forced_run(test_id)
7380
return False
74-
else:
75-
InternalTest.mark_itr_skipped(test_id)
76-
# Marking the test as skipped by ITR so that it appears in pytest's output
77-
item.add_marker(pytest.mark.skip(reason=SKIPPED_BY_ITR_REASON)) # TODO don't rely on internal for reason
78-
return True
81+
82+
InternalTest.mark_itr_skipped(test_id)
83+
# Marking the test as skipped by ITR so that it appears in pytest's output
84+
item.add_marker(pytest.mark.skip(reason=SKIPPED_BY_ITR_REASON)) # TODO don't rely on internal for reason
85+
return True
7986

8087
return False
8188

@@ -311,7 +318,7 @@ def pytest_runtest_protocol(item, nextitem) -> None:
311318
return
312319

313320

314-
def _pytest_runtest_makereport(item, call, outcome):
321+
def _process_outcome(item, call, outcome) -> _TestOutcome:
315322
result = outcome.get_result()
316323

317324
test_id = _get_test_id_from_item(item)
@@ -322,7 +329,7 @@ def _pytest_runtest_makereport(item, call, outcome):
322329
# - it was skipped by ITR
323330
# - it was marked with skipif
324331
if InternalTest.is_finished(test_id):
325-
return
332+
return _TestOutcome()
326333

327334
# In cases where a test was marked as XFAIL, the reason is only available during when call.when == "call", so we
328335
# add it as a tag immediately:
@@ -339,7 +346,7 @@ def _pytest_runtest_makereport(item, call, outcome):
339346
# DEV NOTE: some skip scenarios (eg: skipif) have an exception during setup
340347

341348
if call.when != "teardown" and not (has_exception or result.failed):
342-
return
349+
return _TestOutcome()
343350

344351
xfail = hasattr(result, "wasxfail") or "xfail" in result.keywords
345352
xfail_reason_tag = InternalTest.get_tag(test_id, XFAIL_REASON) if xfail else None
@@ -349,20 +356,18 @@ def _pytest_runtest_makereport(item, call, outcome):
349356
# that's why no XFAIL_REASON or test.RESULT tags will be added.
350357
if result.skipped:
351358
if InternalTest.was_skipped_by_itr(test_id):
352-
# Items that were skipped by ITR already have their status set
353-
return
359+
# Items that were skipped by ITR already have their status and reason set
360+
return _TestOutcome()
354361

355362
if xfail and not has_skip_keyword:
356363
# XFail tests that fail are recorded skipped by pytest, should be passed instead
357364
if not item.config.option.runxfail:
358365
InternalTest.set_tag(test_id, test.RESULT, test.Status.XFAIL.value)
359366
if xfail_reason_tag is None:
360367
InternalTest.set_tag(test_id, XFAIL_REASON, getattr(result, "wasxfail", "XFail"))
361-
InternalTest.mark_pass(test_id)
362-
return
368+
return _TestOutcome(TestStatus.PASS)
363369

364-
InternalTest.mark_skip(test_id, _extract_reason(call))
365-
return
370+
return _TestOutcome(TestStatus.SKIP, _extract_reason(call))
366371

367372
if result.passed:
368373
if xfail and not has_skip_keyword and not item.config.option.runxfail:
@@ -371,24 +376,33 @@ def _pytest_runtest_makereport(item, call, outcome):
371376
InternalTest.set_tag(test_id, XFAIL_REASON, "XFail")
372377
InternalTest.set_tag(test_id, test.RESULT, test.Status.XPASS.value)
373378

374-
InternalTest.mark_pass(test_id)
375-
return
379+
return _TestOutcome(TestStatus.PASS)
376380

377381
if xfail and not has_skip_keyword and not item.config.option.runxfail:
378382
# XPass (strict=True) are recorded failed by pytest, longrepr contains reason
379383
if xfail_reason_tag is None:
380384
InternalTest.set_tag(test_id, XFAIL_REASON, getattr(result, "longrepr", "XFail"))
381385
InternalTest.set_tag(test_id, test.RESULT, test.Status.XPASS.value)
382-
InternalTest.mark_fail(test_id)
383-
return
386+
return _TestOutcome(TestStatus.FAIL)
384387

385388
exc_info = TestExcInfo(call.excinfo.type, call.excinfo.value, call.excinfo.tb) if call.excinfo else None
386389

387-
InternalTest.mark_fail(test_id, exc_info)
390+
return _TestOutcome(status=TestStatus.FAIL, exc_info=exc_info)
391+
392+
393+
def _pytest_runtest_makereport(item: pytest.Item, call: pytest.CallInfo, outcome: pytest.TestReport) -> None:
394+
test_id = _get_test_id_from_item(item)
395+
396+
test_outcome = _process_outcome(item, call, outcome)
397+
398+
if test_outcome.status is None and call.when != "teardown":
399+
return
400+
401+
InternalTest.finish(test_id, test_outcome.status, test_outcome.skip_reason, test_outcome.exc_info)
388402

389403

390404
@pytest.hookimpl(hookwrapper=True)
391-
def pytest_runtest_makereport(item, call) -> None:
405+
def pytest_runtest_makereport(item: pytest.Item, call: pytest.CallInfo) -> None:
392406
"""Store outcome for tracing."""
393407
outcome: pytest.TestReport
394408
outcome = yield

ddtrace/internal/ci_visibility/api/_test.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -126,13 +126,14 @@ def _telemetry_record_event_finished(self):
126126

127127
def finish_test(
128128
self,
129-
status: TestStatus,
129+
status: Optional[TestStatus] = None,
130130
reason: Optional[str] = None,
131131
exc_info: Optional[TestExcInfo] = None,
132132
override_finish_time: Optional[float] = None,
133133
) -> None:
134134
log.debug("Test Visibility: finishing %s, with status: %s, reason: %s", self, status, reason)
135-
self.set_status(status)
135+
if status is not None:
136+
self.set_status(status)
136137
if reason is not None:
137138
self.set_tag(test.SKIP_REASON, reason)
138139
if exc_info is not None:

ddtrace/internal/test_visibility/api.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ class FinishArgs(NamedTuple):
115115
"""InternalTest allows finishing with an overridden finish time (for EFD and other retry purposes)"""
116116

117117
test_id: InternalTestId
118-
status: TestStatus
118+
status: t.Optional[TestStatus] = None
119119
skip_reason: t.Optional[str] = None
120120
exc_info: t.Optional[TestExcInfo] = None
121121
override_finish_time: t.Optional[float] = None
@@ -124,7 +124,7 @@ class FinishArgs(NamedTuple):
124124
@_catch_and_log_exceptions
125125
def finish(
126126
item_id: InternalTestId,
127-
status: ext_api.TestStatus,
127+
status: t.Optional[ext_api.TestStatus] = None,
128128
reason: t.Optional[str] = None,
129129
exc_info: t.Optional[ext_api.TestExcInfo] = None,
130130
override_finish_time: t.Optional[float] = None,

static-analysis.datadog.yml

+5-2
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,12 @@
11
rulesets:
22
- python-best-practices:
33
rules:
4-
nested-blocks:
4+
comment-fixme-todo-ownership:
55
ignore:
66
- "**"
77
max-function-lines:
8-
ignore:
8+
ignore:
99
- "tests/ci_visibility/api/fake_runner_*.py"
10+
nested-blocks:
11+
ignore:
12+
- "**"

0 commit comments

Comments
 (0)