Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Generating tests dynamically for each model #32

Merged
merged 1 commit into from
Jul 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 40 additions & 1 deletion dbt_bouncer/runner.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import inspect
from typing import Dict, List

import pytest
Expand All @@ -22,6 +23,41 @@ def tests(self):
return self.tests_


# Inspiration: https://github.com/pytest-dev/pytest-xdist/discussions/957#discussioncomment-7335007
class MyFunctionItem(pytest.Function):
def __init__(self, model, *args, **kwargs):
self.model: Dict[str, str] = model
super().__init__(*args, **kwargs)


class GenerateTestsPlugin:
"""
For fixtures that are lists (e.g. `models`) this plugin generates a test for each item in the list.
Using alternaticve approaches like parametrize or fixture_params do not work, generating tests using
`pytest_pycollect_makeitem` is one way to get this to work.
"""

def __init__(self, models):
self.models = models

def pytest_pycollect_makeitem(self, collector, name, obj):
items = []
if (inspect.isfunction(obj) or inspect.ismethod(obj)) and (name.startswith("test_")):
fixture_info = pytest.Function.from_parent(
collector, name=name, callobj=obj
)._fixtureinfo
for model in self.models:
item = MyFunctionItem.from_parent(
parent=collector,
name=name,
fixtureinfo=fixture_info,
model=model,
)
items.append(item)

return items


def runner(
models: List[Dict[str, str]],
sources: List[Dict[str, str]],
Expand All @@ -37,4 +73,7 @@ def runner(
setattr(fixtures, att + "_", locals()[att])

# Run the tests, if one fails then pytest will raise an exception
pytest.main(["dbt_bouncer/tests"], plugins=[fixtures])
pytest.main(
["dbt_bouncer/tests"],
plugins=[fixtures, GenerateTestsPlugin(models)],
)
14 changes: 5 additions & 9 deletions dbt_bouncer/tests/test_models.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,9 @@
def populated_model_description(models):
def test_populated_model_description(request, model=None):
"""
Models must have a populated description.
"""

for model in models:
assert (
len(model["description"].strip()) > 4
), f"{model['unique_id']} does not have a populated description."


def test_populated_model_description(models):
populated_model_description(models=models)
model = request.node.model if model is None else model
assert (
len(model["description"].strip()) > 4
), f"{model['unique_id']} does not have a populated description."
2 changes: 1 addition & 1 deletion dbt_project/target/manifest.json

Large diffs are not rendered by default.

Binary file modified dist/dbt-bouncer.pex
Binary file not shown.
21 changes: 21 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import importlib
from pathlib import Path


def pytest_collection_modifyitems(session, config, items):
"""
Pytest runs all functions that start with "test_". This is an issue as out functions in
`./dbt_bouncer/tests` also have this prefix and therefore pytest tries to test those.
This hook is used to dynamically retrieve the names of these functions and remove them
from the list of functions that pytest will run.
"""

dbt_bouncer_tests: list[str] = []
for f in Path("./dbt_bouncer/tests").glob("**/*"):
if f.is_file() and f.name.startswith("test_") and f.name.endswith(".py"):
test_file = importlib.import_module(f"dbt_bouncer.tests.{f.stem}")
dbt_bouncer_tests.extend(
i for i in dir(test_file) if not i.startswith("_") and i != "logger"
)

items[:] = [item for item in items if item.name not in dbt_bouncer_tests]
78 changes: 32 additions & 46 deletions tests/tests/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,81 +2,67 @@

import pytest

from dbt_bouncer.tests.test_models import populated_model_description
from dbt_bouncer.tests.test_models import test_populated_model_description


@pytest.mark.parametrize(
"models,expectation",
"model, expectation",
[
(
[
{
"description": "Description that is more than 4 characters.",
"unique_id": "model.package_name.model_1",
}
],
{
"description": "Description that is more than 4 characters.",
"unique_id": "model.package_name.model_1",
},
does_not_raise(),
),
(
[
{
"description": """A
{
"description": """A
multiline
description
""",
"unique_id": "model.package_name.model_2",
}
],
"unique_id": "model.package_name.model_2",
},
does_not_raise(),
),
(
[
{
"description": "",
"unique_id": "model.package_name.model_3",
}
],
{
"description": "",
"unique_id": "model.package_name.model_3",
},
pytest.raises(AssertionError),
),
(
[
{
"description": " ",
"unique_id": "model.package_name.model_4",
}
],
{
"description": " ",
"unique_id": "model.package_name.model_4",
},
pytest.raises(AssertionError),
),
(
[
{
"description": """
{
"description": """
""",
"unique_id": "model.package_name.model_5",
}
],
"unique_id": "model.package_name.model_5",
},
pytest.raises(AssertionError),
),
(
[
{
"description": "-",
"unique_id": "model.package_name.model_6",
}
],
{
"description": "-",
"unique_id": "model.package_name.model_6",
},
pytest.raises(AssertionError),
),
(
[
{
"description": "null",
"unique_id": "model.package_name.model_7",
}
],
{
"description": "null",
"unique_id": "model.package_name.model_7",
},
pytest.raises(AssertionError),
),
],
)
def test_populated_model_description(models, expectation):
def test_test_populated_model_description(model, expectation):
with expectation:
populated_model_description(models=models)
test_populated_model_description(model=model, request=None)