From c4580bb94c63021a93b6e7c01953898a2239c3d9 Mon Sep 17 00:00:00 2001 From: Sylvain Marie Date: Fri, 27 Jul 2018 10:55:10 +0200 Subject: [PATCH] Initial version, forked from pytest-cases. --- .gitignore | 10 ++ README.md | 76 ++++++++- pytest_steps/__init__.py | 8 + pytest_steps/steps.py | 150 ++++++++++++++++ pytest_steps/tests/__init__.py | 0 .../tests/test_pytest_capabilities.py | 83 +++++++++ pytest_steps/tests/test_steps_no_results.py | 27 +++ pytest_steps/tests/test_steps_with_results.py | 54 ++++++ setup.cfg | 22 +++ setup.py | 161 ++++++++++++++++++ 10 files changed, 589 insertions(+), 2 deletions(-) create mode 100644 pytest_steps/__init__.py create mode 100644 pytest_steps/steps.py create mode 100644 pytest_steps/tests/__init__.py create mode 100644 pytest_steps/tests/test_pytest_capabilities.py create mode 100644 pytest_steps/tests/test_steps_no_results.py create mode 100644 pytest_steps/tests/test_steps_with_results.py create mode 100644 setup.cfg create mode 100644 setup.py diff --git a/.gitignore b/.gitignore index 894a44c..0fbba57 100644 --- a/.gitignore +++ b/.gitignore @@ -102,3 +102,13 @@ venv.bak/ # mypy .mypy_cache/ + +# Pycharm +.idea/ + +# Mkdocs +site/ + +# travis CI +github_travis_rsa* +reports diff --git a/README.md b/README.md index 741e5d0..5d1ef64 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,74 @@ -# python-pytest-steps -A tiny package to ease the creation of test steps with shared intermediate results/state +# pytest-steps + +Create step-wise / incremental tests in `pytest`. + +[![Build Status](https://travis-ci.org/smarie/python-pytest-steps.svg?branch=master)](https://travis-ci.org/smarie/python-pytest-steps) [![Tests Status](https://smarie.github.io/python-pytest-steps/junit/junit-badge.svg?dummy=8484744)](https://smarie.github.io/python-pytest-steps/junit/report.html) [![codecov](https://codecov.io/gh/smarie/python-pytest-steps/branch/master/graph/badge.svg)](https://codecov.io/gh/smarie/python-pytest-steps) [![Documentation](https://img.shields.io/badge/docs-latest-blue.svg)](https://smarie.github.io/python-pytest-steps/) [![PyPI](https://img.shields.io/badge/PyPI-pytest_steps-blue.svg)](https://pypi.python.org/pypi/pytest_steps/) + +**This is the readme for developers.** The documentation for users is available here: [https://smarie.github.io/python-pytest-steps/](https://smarie.github.io/python-pytest-steps/) + +## Want to contribute ? + +Contributions are welcome ! Simply fork this project on github, commit your contributions, and create pull requests. + +Here is a non-exhaustive list of interesting open topics: [https://github.com/smarie/python-pytest-steps/issues](https://github.com/smarie/python-pytest-steps/issues) + +## Running the tests + +This project uses `pytest`. + +```bash +pytest -v pytest_steps/tests/ +``` + +You may need to install requirements for setup beforehand, using + +```bash +pip install -r ci_tools/requirements-test.txt +``` + +## Packaging + +This project uses `setuptools_scm` to synchronise the version number. Therefore the following command should be used for development snapshots as well as official releases: + +```bash +python setup.py egg_info bdist_wheel rotate -m.whl -k3 +``` + +You may need to install requirements for setup beforehand, using + +```bash +pip install -r ci_tools/requirements-setup.txt +``` + +## Generating the documentation page + +This project uses `mkdocs` to generate its documentation page. Therefore building a local copy of the doc page may be done using: + +```bash +mkdocs build +``` + +You may need to install requirements for doc beforehand, using + +```bash +pip install -r ci_tools/requirements-doc.txt +``` + +## Generating the test reports + +The following commands generate the html test report and the associated badge. + +```bash +pytest --junitxml=junit.xml -v pytest_steps/tests/ +ant -f ci_tools/generate-junit-html.xml +python ci_tools/generate-junit-badge.py +``` + +### PyPI Releasing memo + +This project is now automatically deployed to PyPI when a tag is created. Anyway, for manual deployment we can use: + +```bash +twine upload dist/* -r pypitest +twine upload dist/* +``` diff --git a/pytest_steps/__init__.py b/pytest_steps/__init__.py new file mode 100644 index 0000000..05718c0 --- /dev/null +++ b/pytest_steps/__init__.py @@ -0,0 +1,8 @@ +from pytest_steps.steps import test_steps, ResultsHolder + +__all__ = [ + # the submodule + 'steps', + # all symbols imported above + 'test_steps', 'ResultsHolder' +] diff --git a/pytest_steps/steps.py b/pytest_steps/steps.py new file mode 100644 index 0000000..d669355 --- /dev/null +++ b/pytest_steps/steps.py @@ -0,0 +1,150 @@ +from functools import lru_cache +from inspect import signature, getmodule + +import pytest + + +class ResultsHolder: + """ + An object that is passed along the various steps of your tests. + You can put intermediate results in here, and find them in the following steps. + + Note: you can use `vars(results)` to see the available results. + """ + pass + + +def test_steps(*steps, test_step_argname: str='test_step', test_results_argname: str='results'): + """ + Decorates a test function so as to automatically parametrize it with all steps listed as arguments. + + When the steps are functions, this is equivalent to + `@pytest.mark.parametrize(test_step_argname, steps, ids=lambda x: x.__name__)` + + ```python + from pytest_steps import test_steps + + def step_a(): + # perform this step + print("step a") + assert not False + + def step_b(): + # perform this step + print("step b") + assert not False + + @test_steps(step_a, step_b) + def test_suite_no_results(test_step): + # Execute the step + test_step() + ``` + + You can add a 'results' parameter to your test function if you wish to share a `ResultsHolder` object between your + steps. + + ```python + def step_a(results: ResultsHolder): + # perform this step + print("step a") + assert not False + + # intermediate results can be stored in results + results.intermediate_a = 'some intermediate result created in step a' + + def step_b(results: ResultsHolder): + # perform this step, leveraging the previous step's results + print("step b") + new_text = results.intermediate_a + " ... augmented" + print(new_text) + assert len(new_text) == 56 + + @test_steps(step_a, step_b) + def test_suite_with_results(test_step, results: ResultsHolder): + # Execute the step with access to the results holder + test_step(results) + ``` + + :param steps: a list of test steps. They can be anything, but typically they are non-test (not prefixed with 'test') + functions. + :param test_step_argname: the optional name of the function argument that will receive the test step object. + Default is 'test_step'. + :param test_results_argname: the optional name of the function argument that will receive the shared `ResultsHolder` + object if present. Default is 'results'. + :return: + """ + def steps_decorator(test_func): + """ + The generated test function decorator. + + It is equivalent to @mark.parametrize('case_data', cases) where cases is a tuple containing a CaseDataGetter for + all case generator functions + + :param test_func: + :return: + """ + def get_id(f): + if callable(f) and hasattr(f, '__name__'): + return f.__name__ + else: + return str(f) + + step_ids = [get_id(f) for f in steps] + + # Finally create the pytest decorator and apply it + # depending on the presence of test_results_argname in signature + s = signature(test_func) + if test_results_argname in s.parameters: + # the user wishes to share results across test steps. Create a cached fixture + @lru_cache(maxsize=None) + def get_results_holder(**kwargs): + """ + A factory for the ResultsHolder objects. Since it uses @lru_cache, the same ResultsHolder will be + returned when the keyword arguments are the same. + + :param kwargs: + :return: + """ + return ResultsHolder() # TODO use Munch or MaxiMunch from `mixture` project, when publicly available ? + + @pytest.fixture(name=test_results_argname) + def results(request): + """ + The fixture for the ResultsHolder. It implements an intelligent cache so that the same ResultsHolder + object is used across test steps. + + :param request: + :return: + """ + # The object should be different everytime anything changes, except when the test step changes + dont_change_when_these_change = {test_step_argname} + + # We also do not want the 'results' itself nor the pytest 'request' to be taken into account, since + # the first is not yet defined and the second is an internal pytest variable + dont_change_when_these_change.update({test_results_argname, 'request'}) + + # List the values of all the test function parameters that matter + kwargs = {argname: request.getfuncargvalue(argname) + for argname in request.funcargnames + if argname not in dont_change_when_these_change} + + # Get or create the cached Result holder for this combination of parameters + return get_results_holder(**kwargs) + + # Add the fixture dynamically: we have to add it to the function holder module as explained in + # https://github.com/pytest-dev/pytest/issues/2424 + module = getmodule(test_func) + if test_results_argname not in dir(module): + setattr(module, test_results_argname, results) + else: + raise ValueError("The {} fixture already exists in module {}: please specify a different " + "`test_results_argname` in `@test_steps`".format(test_results_argname, module)) + + # Finally parametrize the function with the test steps + parametrizer = pytest.mark.parametrize(test_step_argname, steps, ids=step_ids) + return parametrizer(test_func) + + return steps_decorator + + +test_steps.__test__ = False # to prevent pytest to think that this is a test ! diff --git a/pytest_steps/tests/__init__.py b/pytest_steps/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pytest_steps/tests/test_pytest_capabilities.py b/pytest_steps/tests/test_pytest_capabilities.py new file mode 100644 index 0000000..e727c24 --- /dev/null +++ b/pytest_steps/tests/test_pytest_capabilities.py @@ -0,0 +1,83 @@ +from functools import lru_cache +import pytest + + +# ------------------- The same code than what is generated dynamically by @test_steps +class ResultsHolder: + pass + + +@lru_cache(maxsize=None) +def get_results_holder(**kwargs): + """ + A factory for the ResultsHolder objects. Since it uses @lru_cache, the same ResultsHolder will be returned when + the keyword arguments are the same. + + :param kwargs: + :return: + """ + return ResultsHolder() + + +@pytest.fixture +def results(request): + """ + The fixture for the ResultsHolder + :param request: + :return: + """ + kwargs = {argname: request.getfuncargvalue(argname) + for argname in request.funcargnames + if argname not in {'test_step', 'results', 'request'}} + return get_results_holder(**kwargs) +# ------------------------------------- + + +def step_a(results: ResultsHolder, stupid_param): + """ Step a of the test """ + + # perform this step + print("step a - " + stupid_param) + assert not False + + # Assert that the ResultsHolder object is a brand new one everytime we start this step + assert not hasattr(results, 'intermediate_a') + + # intermediate results can be stored in results + results.intermediate_a = 'some intermediate result created in step a for test ' + stupid_param + + assert not hasattr(results, 'p') + results.p = stupid_param + + +def step_b(results: ResultsHolder, stupid_param): + """ Step b of the test """ + + # perform this step + print("step b - " + stupid_param) + + # assert that step a has been done + assert hasattr(results, 'intermediate_a') + # ... and that the resultsholder object that we get is the one for our test suite (same parameters) + assert results.intermediate_a == 'some intermediate result created in step a for test ' + stupid_param + + new_text = results.intermediate_a + " ... augmented" + print(new_text) + + assert results.p == stupid_param + + +@pytest.fixture(params=['F', 'G']) +def fix(request): + return request.param + + +@pytest.mark.parametrize('really_stupid_param2', ["2A", "2B"]) +@pytest.mark.parametrize('test_step', [step_a, step_b]) +@pytest.mark.parametrize('stupid_param1', ["1a", "1b"]) +def test_manual_pytest_equivalent(test_step, stupid_param1, really_stupid_param2, fix, results: ResultsHolder): + """This test performs the same thing than @test_steps but manually. + See the test_steps_with_results.py for details""" + + # Execute the step + test_step(results, stupid_param1 + really_stupid_param2 + fix) diff --git a/pytest_steps/tests/test_steps_no_results.py b/pytest_steps/tests/test_steps_no_results.py new file mode 100644 index 0000000..c6a56f7 --- /dev/null +++ b/pytest_steps/tests/test_steps_no_results.py @@ -0,0 +1,27 @@ +from pytest_steps import test_steps + + +def step_a(): + """ Step a of the test """ + + # perform this step + print("step a") + assert not False + + +def step_b(): + """ Step b of the test """ + + # perform this step + print("step b") + assert not False + + +# equivalent to +# @pytest.mark.parametrize('test_step', (step_check_a, step_check_b), ids=lambda x: x.__name__) +@test_steps(step_a, step_b) +def test_suite_no_results(test_step): + """ """ + + # Execute the step + test_step() diff --git a/pytest_steps/tests/test_steps_with_results.py b/pytest_steps/tests/test_steps_with_results.py new file mode 100644 index 0000000..c6b9e35 --- /dev/null +++ b/pytest_steps/tests/test_steps_with_results.py @@ -0,0 +1,54 @@ +import pytest + +from pytest_steps import test_steps, ResultsHolder + + +def step_a(results: ResultsHolder, stupid_param): + """ Step a of the test """ + + # perform this step + print("step a - " + stupid_param) + assert not False + + # Assert that the ResultsHolder object is a brand new one everytime we start this step + assert not hasattr(results, 'intermediate_a') + + # intermediate results can be stored in results + results.intermediate_a = 'some intermediate result created in step a for test ' + stupid_param + + assert not hasattr(results, 'p') + results.p = stupid_param + + +def step_b(results: ResultsHolder, stupid_param): + """ Step b of the test """ + + # perform this step + print("step b - " + stupid_param) + + # assert that step a has been done + assert hasattr(results, 'intermediate_a') + # ... and that the resultsholder object that we get is the one for our test suite (same parameters) + assert results.intermediate_a == 'some intermediate result created in step a for test ' + stupid_param + + new_text = results.intermediate_a + " ... augmented" + print(new_text) + + assert results.p == stupid_param + + +@pytest.fixture(params=['F', 'G']) +def fix(request): + return request.param + + +@pytest.mark.parametrize('really_stupid_param2', ["2A", "2B"]) +@test_steps(step_a, step_b) +@pytest.mark.parametrize('stupid_param1', ["1a", "1b"]) +def test_suite_with_results(test_step, stupid_param1, really_stupid_param2, fix, results: ResultsHolder): + """This test is extremely stupid but shows the extreme case where there are parameters and fixtures all over the + place. It asserts that a new resultholder is created for all tests but that the same object is reused across steps + """ + + # Execute the step + test_step(results, stupid_param1 + really_stupid_param2 + fix) diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..4bf57cc --- /dev/null +++ b/setup.cfg @@ -0,0 +1,22 @@ +[egg_info] +#tag_date = 1 already covered by setuptools_scm +#tag_build = .dev --this adds ".dev" at the end of the release name. we already use setuptools_scm so already covered. +#tag_svn_revision = 1 --this adds "_r0" at the end of the release name. we already use setuptools_scm so already covered. + +[bdist_wheel] +# This flag says that the code is written to work on both Python 2 and Python +# 3. If at all possible, it is good practice to do this. If you cannot, you +# will need to generate wheels for each Python version that you support. +universal=0 + +[metadata] +description-file = README.md + +# In order to be able to execute 'python setup.py test' +# from https://docs.pytest.org/en/latest/goodpractices.html#integrating-with-setuptools-python-setup-py-test-pytest-runner +[aliases] +test=pytest + +[tool:pytest] +addopts = --verbose +testpaths = pytest_steps/tests diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..c85ad34 --- /dev/null +++ b/setup.py @@ -0,0 +1,161 @@ +"""A setuptools based setup module. +See: +https://packaging.python.org/en/latest/distributing.html +https://github.com/pypa/sampleproject +""" +from six import raise_from +from os import path + +from setuptools import setup, find_packages + +here = path.abspath(path.dirname(__file__)) + +# *************** Dependencies ********* +INSTALL_REQUIRES = [] +DEPENDENCY_LINKS = [] +SETUP_REQUIRES = ['pytest-runner', 'setuptools_scm', 'pypandoc', 'pandoc'] +TESTS_REQUIRE = ['pytest', 'pytest-logging', 'pytest-cov'] +EXTRAS_REQUIRE = {} + +# simple check +try: + from setuptools_scm import get_version +except Exception as e: + raise_from(Exception('Required packages for setup not found. You may wish you execute ' + '"pip install -r ci_tools/requirements-setup.txt" to install them or alternatively install ' + 'them manually using conda or other system. The list is : ' + str(SETUP_REQUIRES)), e) + +# ************** ID card ***************** +DISTNAME = 'pytest-steps' +DESCRIPTION = 'Create step-wise / incremental tests in pytest.' +MAINTAINER = 'Sylvain MariƩ' +MAINTAINER_EMAIL = 'sylvain.marie@schneider-electric.com' +URL = 'https://github.com/smarie/python-pytest-steps' +LICENSE = 'BSD 3-Clause' +LICENSE_LONG = 'License :: OSI Approved :: BSD License' + +version_for_download_url = get_version() +DOWNLOAD_URL = URL + '/tarball/' + version_for_download_url + +KEYWORDS = 'pytest test step incremental decorator parametrize parameter state share result modular' +# --Get the long description from the README file +# with open(path.join(here, 'README.md'), encoding='utf-8') as f: +# LONG_DESCRIPTION = f.read() +try: + import pypandoc + LONG_DESCRIPTION = pypandoc.convert(path.join(here, 'docs', 'long_description.md'), 'rst').replace('\r', '') +except(ImportError): + from warnings import warn + warn('WARNING pypandoc could not be imported - we recommend that you install it in order to package the ' + 'documentation correctly') + LONG_DESCRIPTION = open('README.md').read() + +# ************* VERSION A ************** +# --Get the Version number from VERSION file, see https://packaging.python.org/single_source_version/ option 4. +# THIS IS DEPRECATED AS WE NOW USE GIT TO MANAGE VERSION +# with open(path.join(here, 'VERSION')) as version_file: +# VERSION = version_file.read().strip() +# OBSOLETES = [] + +setup( + name=DISTNAME, + description=DESCRIPTION, + long_description=LONG_DESCRIPTION, + + # Versions should comply with PEP440. For a discussion on single-sourcing + # the version across setup.py and the project code, see + # https://packaging.python.org/en/latest/single_source_version.html + # version=VERSION, NOW HANDLED BY GIT + + maintainer=MAINTAINER, + maintainer_email=MAINTAINER_EMAIL, + + license=LICENSE, + url=URL, + download_url=DOWNLOAD_URL, + + # See https://pypi.python.org/pypi?%3Aaction=list_classifiers + classifiers=[ + # How mature is this project? Common values are + # 3 - Alpha + # 4 - Beta + # 5 - Production/Stable + 'Development Status :: 5 - Production/Stable', + + # Indicate who your project is intended for + 'Intended Audience :: Developers', + 'Topic :: Software Development :: Testing', + + # Pick your license as you wish (should match "license" above) + LICENSE_LONG, + + # Specify the Python versions you support here. In particular, ensure + # that you indicate whether you support Python 2, Python 3 or both. + # 'Programming Language :: Python :: 2', + # 'Programming Language :: Python :: 2.6', + # 'Programming Language :: Python :: 2.7', + # 'Programming Language :: Python :: 3', + # 'Programming Language :: Python :: 3.3', + # 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + ], + + # What does your project relate to? + keywords=KEYWORDS, + + # You can just specify the packages manually here if your project is + # simple. Or you can use find_packages(). + packages=find_packages(exclude=['contrib', 'docs', 'tests']), + + # Alternatively, if you want to distribute just a my_module.py, uncomment + # this: + # py_modules=["my_module"], + + # List run-time dependencies here. These will be installed by pip when + # your project is installed. For an analysis of "install_requires" vs pip's + # requirements files see: + # https://packaging.python.org/en/latest/requirements.html + install_requires=INSTALL_REQUIRES, + dependency_links=DEPENDENCY_LINKS, + + # we're using git + use_scm_version=True, # this provides the version + adds the date if local non-commited changes. + # use_scm_version={'local_scheme':'dirty-tag'}, # this provides the version + adds '+dirty' if local non-commited changes. + setup_requires=SETUP_REQUIRES, + + # test + # test_suite='nose.collector', + tests_require=TESTS_REQUIRE, + + # List additional groups of dependencies here (e.g. development + # dependencies). You can install these using the following syntax, + # for example: + # $ pip install -e .[dev,test] + extras_require=EXTRAS_REQUIRE, + + # obsoletes=OBSOLETES + + # If there are data files included in your packages that need to be + # installed, specify them here. If using Python 2.6 or less, then these + # have to be included in MANIFEST.in as well. + # package_data={ + # 'sample': ['package_data.dat'], + # }, + + # Although 'package_data' is the preferred approach, in some case you may + # need to place data files outside of your packages. See: + # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa + # In this case, 'data_file' will be installed into '/my_data' + # data_files=[('my_data', ['data/data_file'])], + + # To provide executable scripts, use entry points in preference to the + # "scripts" keyword. Entry points provide cross-platform support and allow + # pip to create the appropriate form of executable for the target platform. + # entry_points={ + # 'console_scripts': [ + # 'sample=sample:main', + # ], + # }, +)