diff --git a/Jenkinsfile b/Jenkinsfile index b558ebc80..7855f6f0f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -38,7 +38,7 @@ pipeline { pip3 install -e . export MPLBACKEND="agg" export OPENBLAS_NUM_THREADS=1 - pytest --cov-report term-missing:skip-covered + pytest --cov-report term-missing:skip-covered --mocks all ''' } discoverGitReferenceBuild() diff --git a/scilpy/denoise/tests/fixtures/mocks.py b/scilpy/denoise/tests/fixtures/mocks.py new file mode 100644 index 000000000..811560598 --- /dev/null +++ b/scilpy/denoise/tests/fixtures/mocks.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- + +import nibabel as nib +import numpy as np +import pytest + + +@pytest.fixture(scope='function') +def bilateral_filtering(mock_creator, expected_results): + """ + Mock to patch the angle aware bilateral filtering function. + Needs to be namespace patched by scripts. + """ + def _mock_side_effect(*args, **kwargs): + if expected_results is None or len(expected_results) == 0: + return None + + return nib.load(expected_results).get_fdata(dtype=np.float32) + + return mock_creator("scilpy.denoise.bilateral_filtering", + "angle_aware_bilateral_filtering", + side_effect=_mock_side_effect) diff --git a/scilpy/reconst/noddi.py b/scilpy/reconst/noddi.py new file mode 100644 index 000000000..1c909262a --- /dev/null +++ b/scilpy/reconst/noddi.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- + +import amico +from os.path import join +import tempfile + + +def get_evaluator(dwi, scheme_filename, mask, para_diff, iso_diff, + lambda1, lambda2, intra_vol_fraction, intra_orientation_dist, + kernels_dir=None): + + with tempfile.TemporaryDirectory() as tmp_dir: + # Setup AMICO + amico.core.setup() + ae = amico.Evaluation('.', '.') + ae.load_data(dwi, scheme_filename, mask_filename=mask) + # Compute the response functions + ae.set_model("NODDI") + + ae.model.set(para_diff, iso_diff, intra_vol_fraction, + intra_orientation_dist, False) + + ae.set_solver(lambda1=lambda1, lambda2=lambda2) + + ae.set_config('ATOMS_path', + kernels_dir or join(tmp_dir.name, 'kernels', + ae.model.id)) + + ae.generate_kernels(regenerate=not kernels_dir) + + return ae diff --git a/scilpy/reconst/tests/fixtures/mocks.py b/scilpy/reconst/tests/fixtures/mocks.py new file mode 100644 index 000000000..a8b2ec790 --- /dev/null +++ b/scilpy/reconst/tests/fixtures/mocks.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- + +import pytest + + +@pytest.fixture(scope='function') +def amico_evaluator(mock_creator): + """ + Mock to patch amico's kernel generation and fitting. + Does not need to be namespace patched by scripts. + """ + return mock_creator("amico", "Evaluation", + mock_attributes=["fit", "generate_kernels", + "load_kernels", "save_results"]) diff --git a/scilpy/tests/__init__.py b/scilpy/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/scilpy/tests/arrays.py b/scilpy/tests/arrays.py index 45021ce41..54be7c9d6 100644 --- a/scilpy/tests/arrays.py +++ b/scilpy/tests/arrays.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + import copy import numpy as np diff --git a/scilpy/tests/checks.py b/scilpy/tests/checks.py new file mode 100644 index 000000000..6c9e07d3a --- /dev/null +++ b/scilpy/tests/checks.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +import numpy as np + + +def _nd_array_match(_arr1, _arr2, _rtol=1E-05, _atol=1E-8): + return np.allclose(_arr1, _arr2, rtol=_rtol, atol=_atol) + + +def _mse_metrics(_arr1, _arr2): + _mse = (_arr1 - _arr2) ** 2. + return np.mean(_mse), np.max(_mse) + + +def assert_images_close(img1, img2): + dtype = img1.header.get_data_dtype() + + assert np.allclose(img1.affine, img2.affine), "Images affines don't match" + + assert _nd_array_match(img1.get_fdata(dtype=dtype), + img2.get_fdata(dtype=dtype)), \ + "Images data don't match. MSE : {} | max SE : {}".format( + *_mse_metrics(img1.get_fdata(dtype=dtype), + img2.get_fdata(dtype=dtype))) + + + +def assert_images_not_close(img1, img2, affine_must_match=True): + dtype = img1.header.get_data_dtype() + + if affine_must_match: + assert np.allclose(img1.affine, img2.affine), \ + "Images affines don't match" + + assert not _nd_array_match(img1.get_fdata(dtype=dtype), + img2.get_fdata(dtype=dtype)), \ + "Images data should not match. MSE : {} | max SE : {}".format( + *_mse_metrics(img1.get_fdata(dtype=dtype), + img2.get_fdata(dtype=dtype))) diff --git a/scilpy/tests/dict.py b/scilpy/tests/dict.py index fa8a533e2..af66fdcd3 100644 --- a/scilpy/tests/dict.py +++ b/scilpy/tests/dict.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- dict_to_average = { "sub-01": { diff --git a/scilpy/tests/plugin.py b/scilpy/tests/plugin.py new file mode 100644 index 000000000..6ed7dff50 --- /dev/null +++ b/scilpy/tests/plugin.py @@ -0,0 +1,175 @@ +# -*- coding: utf-8 -*- + +from glob import glob +from os.path import realpath +from unittest.mock import DEFAULT +import pytest +import warnings + + +""" +Scilpy Pytest plugin. As of now, this plugin is only used to handle mocking, +but it can be extended to hook into other parts of the testing process. + +Mocking interface +----------------- + +Writing mocking fixtures is long and tedious. The interface provided by +unittest.mock, and the pytest-mock overhead, is cumbersome. Inasmuch, with +the default pytest framework, it is impossible to share mocks between tests +that are not located within the same module. + +This plugin registers early into pytest and thus, can investiguate the modules' +structure and load stuff into the tests namespaces before they are executed. + +- It first hooks into pytest_addoption to add a new command line option to + load mocks for any or all modules in the scilpy package, --mocks. + +- It then hooks into pytest_configure to load the mocks activated by the user + into the test session. Note that this way, all mocks associated with a module + gets injected into pytest's namespace, which might not be granular enough for + some use cases (see below). + +To make mock creation and collection in test instances, and to allow for a more +granular selection of mocks from mocking modules, this plugin provides two +fixtures: + +- mock_creator : the mock_creator fixture exposes the bases interface of + unittest.mock patchers, but with a more convenient syntax. It + is able to patch mutliple attributes at once, and can be + configured to create the patched object if it does not exist. + +- mock_collector : the mock_collector fixture is a helper function that is used + to collect specific mocks from mocking modules. It is also + used to modify the namespace into which the mocked objects + get patched. This is required for some mocks to be used with + scripts, when their import is relative (e.g. from . import). + +Creating a new mock is done using the mock_creator fixture. All mocks must be +placed inside the scilpy library, in the tests directories of their respective +modules, in fixtures/mocks.py. This is own they get discovered by the plugin. + +- A mock fixture must have a relevant name (e.g. amico_evaluator patches + several parts of the amico.Evaluation class). Its return value is the result + of calling the mock_creator fixture. + +- The mock_creator fixture does not need to be imported, it is provided + automatically by the pytest framework. Simply add mock_creator as a parameter + to the mock fixture function. + +Using mocks in tests is done using the mock_collector fixture. Like the +mock_creator, it is provided automatically by the pytest framework. Simply +add mock_collector as a parameter to the test function that requires mocking. +To use the mocks, call the mock_collector fixture with the list of mock names +to use. Additionally, the mock_collector fixture can be used to modify the +namespace into which the mocks are injected, by providing a patch_path argument +as a second parameter. The returned dictionary indexes loaded mocks by their +name and can be used to assert their usage throughout the test case. +""" + + +AUTOMOCK = DEFAULT + +# Load mock modules from all library tests + +MOCK_MODULES = list(_module.replace("/", ".").replace(".py", "") + for _module in glob("**/tests/fixtures/mocks.py", + root_dir=realpath('.'), + recursive=True)) + +MOCK_PACKAGES = list(m.split(".")[-4] for m in MOCK_MODULES) + + +# Helper function to select mocks + +def _get_active_mocks(include_list=None, exclude_list=None): + """ + Returns a list of all packages with active mocks in the current session + + Parameters + ---------- + include_list: iterable or None + List of scilpy packages to consider + + exclude_list: iterable or None + List of scilpy packages to exclude from consideration + + Returns + ------- + list: list of packages with active mocks + """ + + def _active_names(): + def _exclude(_l): + if exclude_list is None: + return _l + return filter(lambda _i: _i not in exclude_list, _l) + + if include_list is None or len(include_list) == 0: + return [] + + if "all" in include_list: + return _exclude(MOCK_PACKAGES) + + return _exclude([_m for _m in include_list if _m in MOCK_PACKAGES]) + + return list(map(lambda _m: _m[1], + filter(lambda _m: _m[0] in _active_names(), + zip(MOCK_PACKAGES, MOCK_MODULES)))) + + +# Create hooks and fixtures to handle mocking from pytest command line + +def pytest_addoption(parser): + parser.addoption( + "--mocks", + nargs='+', + choices=["all"] + MOCK_PACKAGES, + help="Load mocks for scilpy packages to accelerate" + "tests and prevent testing external dependencies") + + +def pytest_configure(config): + _toggle_mocks = config.getoption("--mocks") + for _mock_mod in _get_active_mocks(_toggle_mocks): + config.pluginmanager.import_plugin(_mock_mod) + + +@pytest.fixture +def mock_collector(request): + """ + Pytest fixture to collect a specific set of mocks for a test case + """ + def _collector(mock_names, patch_path=None): + try: + return {_name: request.getfixturevalue(_name)(patch_path) + for _name in mock_names} + except pytest.FixtureLookupError: + warnings.warn(f"Some fixtures in {mock_names} cannot be found.") + return None + return _collector + + +@pytest.fixture +def mock_creator(mocker): + """ + Pytest fixture to create a namespace patchable mock + """ + def _mocker(base_module, object_name, side_effect=None, + mock_attributes=None): + + def _patcher(module_name=None): + _base = base_module if module_name is None else module_name + _mock_target = "{}.{}".format(_base, object_name) + + if mock_attributes is not None: + return mocker.patch.multiple(_mock_target, + **{a: AUTOMOCK + for a in mock_attributes}) + + return mocker.patch(_mock_target, side_effect=side_effect, + create=True) + + return _patcher + + return _mocker diff --git a/scilpy/version.py b/scilpy/version.py index 4927cb187..796436505 100644 --- a/scilpy/version.py +++ b/scilpy/version.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -import itertools import glob import os diff --git a/scripts/scil_NODDI_maps.py b/scripts/scil_NODDI_maps.py index bb720a85c..07bf5d700 100755 --- a/scripts/scil_NODDI_maps.py +++ b/scripts/scil_NODDI_maps.py @@ -16,7 +16,6 @@ import sys import tempfile -import amico from dipy.io.gradients import read_bvals_bvecs import numpy as np @@ -27,6 +26,7 @@ assert_output_dirs_exist_and_empty, redirect_stdout_c) from scilpy.gradients.bvec_bval_tools import fsl2mrtrix, identify_shells +from scilpy.reconst.noddi import get_evaluator EPILOG = """ Reference: @@ -126,48 +126,33 @@ def main(): 'at {}.'.format(len(shells_centroids), shells_centroids)) with redirected_stdout: - # Load the data - amico.core.setup() - ae = amico.Evaluation('.', '.') - ae.load_data(args.in_dwi, - tmp_scheme_filename, - mask_filename=args.mask) - # Compute the response functions - ae.set_model("NODDI") - intra_vol_frac = np.linspace(0.1, 0.99, 12) intra_orient_distr = np.hstack((np.array([0.03, 0.06]), np.linspace(0.09, 0.99, 10))) - ae.model.set(args.para_diff, args.iso_diff, - intra_vol_frac, intra_orient_distr, - False) - ae.set_solver(lambda1=args.lambda1, lambda2=args.lambda2) - - # The kernels are, by default, set to be in the current directory - # Depending on the choice, manually change the saving location - if args.save_kernels: - kernels_dir = os.path.join(args.save_kernels) - regenerate_kernels = True - elif args.load_kernels: - kernels_dir = os.path.join(args.load_kernels) - regenerate_kernels = False - else: - kernels_dir = os.path.join(tmp_dir.name, 'kernels', ae.model.id) - regenerate_kernels = True - - ae.set_config('ATOMS_path', kernels_dir) - ae.set_config('OUTPUT_path', args.out_dir) - ae.generate_kernels(regenerate=regenerate_kernels) + # One of those is going to be None, or have a value.If it is a valid + # kernels path, then everything will work in get_evaluator. + kernels_dir = (args.save_kernels or + args.load_kernels or + os.path.join(tmp_dir.name, 'kernels')) + + # Load the data + amico = get_evaluator(args.in_dwi, tmp_scheme_filename, args.mask, + args.para_diff, args.iso_diff, + args.lambda1, args.lambda2, + intra_vol_frac, intra_orient_distr, + kernels_dir=kernels_dir) + if args.compute_only: return - ae.load_kernels() + amico.load_kernels() # Model fit - ae.fit() + amico.fit() + # Save the results - ae.save_results() + amico.save_results() tmp_dir.cleanup() diff --git a/scripts/tests/test_NODDI_maps.py b/scripts/tests/test_NODDI_maps.py index 946656adc..59c29b014 100644 --- a/scripts/tests/test_NODDI_maps.py +++ b/scripts/tests/test_NODDI_maps.py @@ -16,20 +16,26 @@ def test_help_option(script_runner): assert ret.success -def test_execution_commit_amico(script_runner): +def test_execution_commit_amico(script_runner, mock_collector): os.chdir(os.path.expanduser(tmp_dir.name)) - in_dwi = os.path.join(get_home(), 'commit_amico', - 'dwi.nii.gz') - in_bval = os.path.join(get_home(), 'commit_amico', - 'dwi.bval') - in_bvec = os.path.join(get_home(), 'commit_amico', - 'dwi.bvec') - mask = os.path.join(get_home(), 'commit_amico', - 'mask.nii.gz') + _mocks = mock_collector(["amico_evaluator"]) + + in_dwi = os.path.join(get_home(), 'commit_amico', 'dwi.nii.gz') + in_bval = os.path.join(get_home(), 'commit_amico', 'dwi.bval') + in_bvec = os.path.join(get_home(), 'commit_amico', 'dwi.bvec') + mask = os.path.join(get_home(), 'commit_amico', 'mask.nii.gz') + ret = script_runner.run('scil_NODDI_maps.py', in_dwi, in_bval, in_bvec, '--mask', mask, '--out_dir', 'noddi', '--b_thr', '30', '--para_diff', '0.0017', '--iso_diff', '0.003', '--lambda1', '0.5', '--lambda2', '0.001', '--processes', '1') + assert ret.success + + if _mocks["amico_evaluator"]: + amico_mock = _mocks["amico_evaluator"] + assert amico_mock["fit"].called_once() + assert amico_mock["generate_kernels"].called_once() + assert amico_mock["save_results"].called_once() diff --git a/scripts/tests/test_sh_to_aodf.py b/scripts/tests/test_sh_to_aodf.py index a5d9eb469..4f9384202 100644 --- a/scripts/tests/test_sh_to_aodf.py +++ b/scripts/tests/test_sh_to_aodf.py @@ -8,6 +8,7 @@ import tempfile from scilpy.io.fetcher import get_testing_files_dict, fetch_data, get_home +from scilpy.tests.checks import assert_images_close, assert_images_not_close # If they already exist, this only takes 5 seconds (check md5sum) @@ -16,28 +17,20 @@ tmp_dir = tempfile.TemporaryDirectory() -@pytest.fixture -def mock_filtering(mocker, out_fodf): - def _mock(*args, **kwargs): - img = nib.load(out_fodf) - return img.get_fdata().astype(np.float32) - - script = 'scil_sh_to_aodf' - filtering_fn = "angle_aware_bilateral_filtering" - return mocker.patch("scripts.{}.{}".format(script, filtering_fn), - side_effect=_mock, create=True) - - def test_help_option(script_runner): ret = script_runner.run('scil_sh_to_aodf.py', '--help') assert ret.success -@pytest.mark.parametrize("in_fodf,out_fodf", +@pytest.mark.parametrize("in_fodf,expected_results", [[os.path.join(data_path, 'fodf_descoteaux07_sub.nii.gz'), - os.path.join(data_path, 'fodf_descoteaux07_sub_full.nii.gz')]]) -def test_asym_basis_output(script_runner, mock_filtering, in_fodf, out_fodf): + os.path.join(data_path, 'fodf_descoteaux07_sub_full.nii.gz')]], + scope='function') +def test_asym_basis_output(script_runner, in_fodf, expected_results, + mock_collector): + os.chdir(os.path.expanduser(tmp_dir.name)) + _mocks = mock_collector(["bilateral_filtering"], "scripts.scil_sh_to_aodf") ret = script_runner.run('scil_sh_to_aodf.py', in_fodf, 'out_fodf1.nii.gz', @@ -49,20 +42,24 @@ def test_asym_basis_output(script_runner, mock_filtering, in_fodf, out_fodf): print_result=True, shell=True) assert ret.success - mock_filtering.assert_called_once() - ret_fodf = nib.load("out_fodf1.nii.gz") - test_fodf = nib.load(out_fodf) - assert np.allclose(ret_fodf.get_fdata(), test_fodf.get_fdata()) + if _mocks["bilateral_filtering"]: + _mocks["bilateral_filtering"].assert_called_once() + assert_images_close(nib.load(expected_results), + nib.load("out_fodf1.nii.gz")) -@pytest.mark.parametrize("in_fodf,out_fodf,sym_fodf", + +@pytest.mark.parametrize("in_fodf,expected_results,sym_fodf", [[os.path.join(data_path, "fodf_descoteaux07_sub.nii.gz"), os.path.join(data_path, "fodf_descoteaux07_sub_full.nii.gz"), - os.path.join(data_path, "fodf_descoteaux07_sub_sym.nii.gz")]]) -def test_sym_basis_output( - script_runner, mock_filtering, in_fodf, out_fodf, sym_fodf): + os.path.join(data_path, "fodf_descoteaux07_sub_sym.nii.gz")]], + scope='function') +def test_sym_basis_output(script_runner, in_fodf, expected_results, sym_fodf, + mock_collector): + os.chdir(os.path.expanduser(tmp_dir.name)) + _mocks = mock_collector(["bilateral_filtering"], "scripts.scil_sh_to_aodf") ret = script_runner.run('scil_sh_to_aodf.py', in_fodf, @@ -76,18 +73,21 @@ def test_sym_basis_output( print_result=True, shell=True) assert ret.success - mock_filtering.assert_called_once() - ret_sym_fodf = nib.load("out_sym.nii.gz") - test_sym_fodf = nib.load(sym_fodf) - assert np.allclose(ret_sym_fodf.get_fdata(), test_sym_fodf.get_fdata()) + if _mocks["bilateral_filtering"]: + _mocks["bilateral_filtering"].assert_called_once() + assert_images_close(nib.load(sym_fodf), nib.load("out_sym.nii.gz")) -@pytest.mark.parametrize("in_fodf,out_fodf", + +@pytest.mark.parametrize("in_fodf,expected_results", [[os.path.join(data_path, "fodf_descoteaux07_sub_full.nii.gz"), - os.path.join(data_path, "fodf_descoteaux07_sub_twice.nii.gz")]]) -def test_asym_input(script_runner, mock_filtering, in_fodf, out_fodf): + os.path.join(data_path, "fodf_descoteaux07_sub_twice.nii.gz")]], + scope='function') +def test_asym_input(script_runner, in_fodf, expected_results, mock_collector): + os.chdir(os.path.expanduser(tmp_dir.name)) + _mocks = mock_collector(["bilateral_filtering"], "scripts.scil_sh_to_aodf") ret = script_runner.run('scil_sh_to_aodf.py', in_fodf, @@ -100,35 +100,32 @@ def test_asym_input(script_runner, mock_filtering, in_fodf, out_fodf): print_result=True, shell=True) assert ret.success - mock_filtering.assert_called_once() - - ret_fodf = nib.load("out_fodf3.nii.gz") - test_fodf = nib.load(out_fodf) - assert np.allclose(ret_fodf.get_fdata(), test_fodf.get_fdata()) + if _mocks["bilateral_filtering"]: + _mocks["bilateral_filtering"].assert_called_once() -@pytest.mark.parametrize("in_fodf,out_fodf", + assert_images_close(nib.load(expected_results), + nib.load("out_fodf3.nii.gz")) + + +@pytest.mark.parametrize("in_fodf,expected_results", [[os.path.join(data_path, 'fodf_descoteaux07_sub.nii.gz'), os.path.join(data_path, 'fodf_descoteaux07_sub_full.nii.gz')]]) -def test_cosine_method(script_runner, mock_filtering, in_fodf, out_fodf): +def test_cosine_method(script_runner, in_fodf, expected_results): + os.chdir(os.path.expanduser(tmp_dir.name)) + # method cosine is fast and not mocked ret = script_runner.run('scil_sh_to_aodf.py', in_fodf, 'out_fodf1.nii.gz', '--sphere', 'repulsion100', '--method', 'cosine', - '--sh_basis', 'descoteaux07', - '-f', + '--sh_basis', 'descoteaux07', '-f', print_result=True, shell=True) assert ret.success - # method cosine is fast and not mocked - mock_filtering.assert_not_called() - - ret_fodf = nib.load("out_fodf1.nii.gz") - test_fodf = nib.load(out_fodf) - # We expect the output to be different from the # one obtained with angle-aware bilateral filtering - assert not np.allclose(ret_fodf.get_fdata(), test_fodf.get_fdata()) + assert_images_not_close(nib.load(expected_results), + nib.load("out_fodf1.nii.gz")) diff --git a/setup.py b/setup.py index f8127082b..df9a0ee87 100644 --- a/setup.py +++ b/setup.py @@ -70,11 +70,17 @@ def run(self): with open(ver_file) as f: exec(f.read()) -entry_point_legacy = [] + +def _format_entry_point(_script, _base): + return "{}={}.{}:main".format(os.path.basename(_script), _base, + os.path.basename(_script).split(".")[0]) + + +entry_points = [_format_entry_point(s, "scripts") for s in SCRIPTS] + if os.getenv('SCILPY_LEGACY') != 'False': - entry_point_legacy = ["{}=scripts.legacy.{}:main".format( - os.path.basename(s), - os.path.basename(s).split(".")[0]) for s in LEGACY_SCRIPTS] + entry_points += [_format_entry_point(s, "scripts.legacy") + for s in LEGACY_SCRIPTS] opts = dict(name=NAME, maintainer=MAINTAINER, @@ -98,10 +104,8 @@ def run(self): setup_requires=['cython', 'numpy'], install_requires=external_dependencies, entry_points={ - 'console_scripts': ["{}=scripts.{}:main".format( - os.path.basename(s), - os.path.basename(s).split(".")[0]) for s in SCRIPTS] + - entry_point_legacy + 'console_scripts': entry_points, + 'pytest11': ["scilpy-test=scilpy.tests.plugin"] }, data_files=[('data/LUT', ["data/LUT/freesurfer_desikan_killiany.json",