diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ccc842a2..ebc43e46 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,42 +32,32 @@ jobs: if: startsWith(runner.os, 'Linux') with: path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('**/*requirements.txt') }} + key: ${{ runner.os }}-pip-${{ hashFiles('pyproject.toml') }} - uses: actions/cache@v4 if: startsWith(runner.os, 'macOS') with: path: ~/Library/Caches/pip - key: ${{ runner.os }}-pip-${{ hashFiles('**/*requirements.txt') }} + key: ${{ runner.os }}-pip-${{ hashFiles('pyproject.toml') }} - uses: actions/cache@v4 if: startsWith(runner.os, 'Windows') with: path: ~\AppData\Local\pip\Cache - key: ${{ runner.os }}-py${{ matrix.python-version }}-pip-${{ hashFiles('**/*requirements.txt') }} + key: ${{ runner.os }}-py${{ matrix.python-version }}-pip-${{ hashFiles('pyproject.toml') }} - name: Install dependencies run: | python -m pip install --upgrade pip - pip install numpy wheel - pip install -r requirements.txt - pip install -r dev-requirements.txt + pip install .[development] - # We run the tests on the installed package, with all optional dependencies - # Note the use of the -Wa flag to show DeprecationWarnings - - name: Unit tests + - name: Unit tests and doc tests run: | - python -m pip install .[diffshow] - cd ~ - python -Wa -m pytest --pyargs skued --import-mode=importlib + python -Wa -m pytest - name: Build documentation run: | - python setup.py build_sphinx - - - name: Doctests - run: | - python -m sphinx -b doctest docs build + sphinx-build -M html docs build/docs release: @@ -87,8 +77,8 @@ jobs: - name: Install dependencies run: | - pip install -r requirements.txt - pip install -r dev-requirements.txt + pip install build + pip install .[development] - name: Create release description run: | @@ -97,7 +87,7 @@ jobs: - name: Create source distribution run: | - python setup.py sdist + python -m build - name: Create release uses: softprops/action-gh-release@v2 diff --git a/.readthedocs.yml b/.readthedocs.yml index 32bc2079..6862b15e 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -14,5 +14,7 @@ build: python: install: - - requirements: requirements.txt - - requirements: dev-requirements.txt \ No newline at end of file + - method: pip + path: . + extra_requirements: + - development \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bd8307ca..522c6eb5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -115,10 +115,9 @@ Once you've fixed all merge conflicts, do: ### Build environment setup -To create an appropriate development environment, you need to install the base requirements (`requirements.txt`) as well as extra, development requirements (`dev-requirements.txt`) +To create an appropriate development environment, you need to install the base requirements as well as extra, development requirements: - pip install -r requirements - pip install -r dev-requirements + pip install .[development] ## Guidelines diff --git a/MANIFEST.in b/MANIFEST.in index 4ecd9a85..811ec51a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,8 +1,6 @@ include README.md include CHANGELOG.rst include LICENSE.txt -include requirements.txt -include dev-requirements.txt recursive-include skued/baseline/data * recursive-include skued/simulation/data * diff --git a/dev-requirements.txt b/dev-requirements.txt deleted file mode 100644 index 8879afbb..00000000 --- a/dev-requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -setuptools; python_version >= '3.12' -# The ability to build documentation using `python setup.py build_sphinx` -# has been removed as of Sphinx v7. -# Until the setup script `setup.py` has been changed to setup.cfg -# we cannot use sphinx 7+ -Sphinx >= 3, <7 -sphinx_rtd_theme >= 0.4 -cython >= 0.25 -pytest >= 6 -black -wheel \ No newline at end of file diff --git a/docs/installation.rst b/docs/installation.rst index 9e0b2de3..07e37f3d 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -9,7 +9,7 @@ Installation Requirements ============ -Scikit-ued works on Linux, Mac OS X and Windows. It requires Python 3.7+. Packages requirements are `listed here `_. +Scikit-ued works on Linux, Mac OS X and Windows. It requires Python 3.7+. Packages requirements are `listed here `_. Install scikit-ued ================== diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..453b02d3 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,75 @@ +[build-system] +requires = ["build", "setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[tool.setuptools.dynamic] +version = {attr = "skued.__version__"} + +[project] +name = "scikit-ued" +dynamic = ["version"] +authors = [ + { name="Laurent P. René de Cotret", email="laurent.decotret@outlook.com" }, +] +maintainers = [ + { name="Laurent P. René de Cotret", email="laurent.decotret@outlook.com" }, +] +description = "Collection of algorithms and functions for ultrafast electron scattering" +readme = "README.md" +license = {file = "LICENSE"} +requires-python = ">=3.7, <4" +dependencies = [ + "crystals >= 1.3.1, < 2", + "npstreams >= 1.6.5, < 2", + "numpy >= 1.17, < 3", + "pywavelets >= 1.0.0, < 2", + "scikit-image >= 0.19, < 1", + # See https://github.com/scipy/scipy/issues/17740 + "scipy >= 1.5.0, < 2, != 1.10.0", + "pyyaml >= 3.1", + "matplotlib >= 3.5, <4", +] +keywords=["ultrafast electron scattering"] +classifiers = [ + "Environment :: Console", + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", + "Natural Language :: English", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Physics", +] + +[project.optional-dependencies] +development = [ + "Sphinx >= 3", + "sphinx_rtd_theme >= 0.4", + "pytest >= 6", + "black", +] +diffshow = ["pyqtgraph>=0.12,<1", "PyQt5"] + +[project.urls] +Documentation = "https://scikit-ued.readthedocs.io/" +Repository = "https://github.com/LaurentRDC/scikit-ued" +"Bug Tracker" = "https://github.com/LaurentRDC/scikit-ued/issues" + +[project.scripts] +skued-cli = "skued.__main__:main" + +[tool.black] +line-length = 120 +include = '\.pyi?$' + +[tool.isort] +profile = "black" + +[tool.pytest.ini_options] +minversion = "6.0" +log_cli_level = "INFO" +# Very cool ability for pytest to also run doctests on package contents with `-doctest-modules` +addopts = [ + "--doctest-modules", +] \ No newline at end of file diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 75a1c7b4..00000000 --- a/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -crystals >= 1.3.1, < 2 -npstreams >= 1.6.5, < 2 -numpy >= 1.17, < 3 -pywavelets >= 1.0.0, < 2 -scikit-image >= 0.19, < 1 -# See https://github.com/scipy/scipy/issues/17740 -scipy >= 1.5.0, < 2, != 1.10.0 -pyyaml >= 3.1 -matplotlib >= 3.5, <4 diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index dd84e9d3..00000000 --- a/setup.cfg +++ /dev/null @@ -1,5 +0,0 @@ -[bdist_wheel] -universal = 0 - -[metadata] -description-file = README.md \ No newline at end of file diff --git a/setup.py b/setup.py deleted file mode 100644 index 1c3d7463..00000000 --- a/setup.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- coding: utf-8 -*- -import re -from pathlib import Path - -# import numpy -# from Cython.Build import cythonize -from setuptools import find_packages, setup - -PACKAGE_NAME = "scikit-ued" -DESCRIPTION = "Collection of algorithms and functions for ultrafast electron scattering" -URL = "http://scikit-ued.readthedocs.io" -DOWNLOAD_URL = "http://github.com/LaurentRDC/scikit-ued" -AUTHOR = "Laurent P. René de Cotret" -AUTHOR_EMAIL = "laurent.decotret@outlook.com" -BASE_PACKAGE = "skued" - -base_path = Path(__file__).parent -with open(base_path / BASE_PACKAGE / "__init__.py") as f: - module_content = f.read() - VERSION = ( - re.compile(r".*__version__ = \"(.*?)\"", re.S).match(module_content).group(1) - ) - LICENSE = ( - re.compile(r".*__license__ = \"(.*?)\"", re.S).match(module_content).group(1) - ) - -with open("README.md") as f: - README = f.read() - -with open("requirements.txt") as f: - REQUIREMENTS = [line for line in f.read().split("\n") if len(line.strip())] - -exclude = {"exclude": ["external*", "docs", "*cache"]} -PACKAGES = [ - BASE_PACKAGE + "." + x - for x in find_packages(str(base_path / BASE_PACKAGE), **exclude) -] -if BASE_PACKAGE not in PACKAGES: - PACKAGES.append(BASE_PACKAGE) - - -if __name__ == "__main__": - setup( - name=PACKAGE_NAME, - description=DESCRIPTION, - long_description=README, - long_description_content_type="text/markdown", - license=LICENSE, - url=URL, - download_url=DOWNLOAD_URL, - version=VERSION, - author=AUTHOR, - author_email=AUTHOR_EMAIL, - maintainer=AUTHOR, - maintainer_email=AUTHOR_EMAIL, - install_requires=REQUIREMENTS, - extras_require={"diffshow": ["pyqtgraph>=0.12,<1", "PyQt5"]}, - keywords="ultrafast electron scattering", - project_urls={ - "Documentation": "https://scikit-ued.readthedocs.io/", - "Source": "https://github.com/LaurentRDC/scikit-ued", - }, - python_requires=">=3.7", - packages=PACKAGES, - entry_points={"console_scripts": ["skued = skued.__main__:main"]}, - include_package_data=True, - zip_safe=False, - # include_dirs = [numpy.get_include()], - # ext_modules = cythonize("skued/*/**.pyx", - # compiler_directives = {'language_level':3, - # 'boundscheck': False}), - # list of possible classifiers: - # https://pypi.python.org/pypi?%3Aaction=list_classifiers - classifiers=[ - "Environment :: Console", - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Science/Research", - "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", - "Natural Language :: English", - "Operating System :: OS Independent", - "Programming Language :: Python :: 3", - "Topic :: Scientific/Engineering", - "Topic :: Scientific/Engineering :: Physics", - ], - ) diff --git a/skued/__main__.py b/skued/__main__.py index edeb486f..24f5acc6 100644 --- a/skued/__main__.py +++ b/skued/__main__.py @@ -10,9 +10,7 @@ from . import __version__ from .io import WITH_PYQTGRAPH, diffshow -parser = argparse.ArgumentParser( - prog="skued", description=f"scikit-ued {__version__} command-line utilities." -) +parser = argparse.ArgumentParser(prog="skued", description=f"scikit-ued {__version__} command-line utilities.") subparsers = parser.add_subparsers(title="command", dest="command") diff --git a/skued/affine.py b/skued/affine.py index 29109b74..f9b13622 100644 --- a/skued/affine.py +++ b/skued/affine.py @@ -39,9 +39,7 @@ def affine_map(array): extended_matrix[:3, :3] = array return extended_matrix else: - raise ValueError( - "Array shape not 3x3 or 4x4, and thus is not a transformation matrix." - ) + raise ValueError("Array shape not 3x3 or 4x4, and thus is not a transformation matrix.") def transform(matrix, array): diff --git a/skued/baseline/algorithms.py b/skued/baseline/algorithms.py index 1e0bf1b1..c2bd9368 100644 --- a/skued/baseline/algorithms.py +++ b/skued/baseline/algorithms.py @@ -159,9 +159,7 @@ def baseline_dwt( ) -def _iterative_baseline( - array, max_iter, mask, background_regions, axes, approx_rec_func, func_kwargs -): +def _iterative_baseline(array, max_iter, mask, background_regions, axes, approx_rec_func, func_kwargs): """ Base function for iterative baseline determination. This function is not meant to be called directly. See `baseline_dt` or `baseline_dwt`. @@ -205,9 +203,7 @@ def _iterative_baseline( # Since most wavelet transforms only works on even-length signals, we might have to extend. # See numpy.pad() docs for a formatting of the padding tuple constructed below original_shape = array.shape - padding = [ - (0, 0) for dim in range(array.ndim) - ] # e.g. 2D : padding = [ (0,0), (0,0) ] + padding = [(0, 0) for dim in range(array.ndim)] # e.g. 2D : padding = [ (0,0), (0,0) ] for axis in axes: if original_shape[axis] % 2 == 1: padding[axis] = (0, 1) @@ -347,22 +343,16 @@ def _dwt_approx_rec(array, level, wavelet, mode, axis): # Check maximum decomposition level # For 2D array, check the condition with shortest dimension min(array.shape). This is how # it is done in PyWavelet.wavedec2. - max_level = pywt.dwt_max_level( - data_len=array.shape[axis], filter_len=wavelet.dec_len - ) + max_level = pywt.dwt_max_level(data_len=array.shape[axis], filter_len=wavelet.dec_len) if level is None: level = max_level elif max_level < level: - warn( - f"Decomposition level {level} higher than maximum {max_level}. Maximum is used." - ) + warn(f"Decomposition level {level} higher than maximum {max_level}. Maximum is used.") level = max_level # By now, we are sure that the decomposition level will be supported. # Decompose the signal using the multilevel discrete wavelet transform - coeffs = pywt.wavedec( - data=array, wavelet=wavelet, level=level, mode=mode, axis=axis - ) + coeffs = pywt.wavedec(data=array, wavelet=wavelet, level=level, mode=mode, axis=axis) app_coeffs, det_coeffs = coeffs[0], coeffs[1:] # Replace detail coefficients by 0; keep the correct length so that the @@ -378,9 +368,7 @@ def _dwt_approx_rec(array, level, wavelet, mode, axis): # Sometimes pywt.waverec returns a signal that is longer than the original signal while reconstructed.shape[axis] > array.shape[axis]: - reconstructed = np.swapaxes( - np.swapaxes(reconstructed, 0, axis)[: array.shape[axis]], 0, axis - ) + reconstructed = np.swapaxes(np.swapaxes(reconstructed, 0, axis)[: array.shape[axis]], 0, axis) return reconstructed @@ -427,22 +415,16 @@ def _dwt_approx_rec2(array, level, wavelet, mode, axis): # Check maximum decomposition level # For 2D array, check the condition with shortest dimension min(array.shape). This is how # it is done in PyWavelet.wavedec2. - max_level = pywt.dwt_max_level( - data_len=min(array.shape[ax] for ax in axis), filter_len=wavelet.dec_len - ) + max_level = pywt.dwt_max_level(data_len=min(array.shape[ax] for ax in axis), filter_len=wavelet.dec_len) if level is None: level = max_level elif max_level < level: - warn( - f"Decomposition level {level} higher than maximum {max_level}. Maximum is used." - ) + warn(f"Decomposition level {level} higher than maximum {max_level}. Maximum is used.") level = max_level # By now, we are sure that the decomposition level will be supported. # Decompose the signal using the multilevel discrete wavelet transform - coeffs = pywt.wavedec2( - data=array, wavelet=wavelet, level=level, mode=mode, axes=axis - ) + coeffs = pywt.wavedec2(data=array, wavelet=wavelet, level=level, mode=mode, axes=axis) app_coeffs, det_coeffs = coeffs[0], coeffs[1:] # Replace detail coefficients by 0; keep the correct length so that the diff --git a/skued/baseline/dtcwt.py b/skued/baseline/dtcwt.py index b548ec56..e43a9cf1 100644 --- a/skued/baseline/dtcwt.py +++ b/skued/baseline/dtcwt.py @@ -99,17 +99,13 @@ def dtcwt(data, first_stage, wavelet, mode="constant", level=None, axis=-1): data = np.asarray(data, dtype=float) / np.sqrt(2) if level is None: - level = dt_max_level( - data=data, first_stage=first_stage, wavelet=wavelet, axis=axis - ) + level = dt_max_level(data=data, first_stage=first_stage, wavelet=wavelet, axis=axis) elif level == 0: return [data] # Check axis bounds if axis > data.ndim - 1: - raise ValueError( - f"Input array has {data.ndim} dimensions, but input axis is {axis}" - ) + raise ValueError(f"Input array has {data.ndim} dimensions, but input axis is {axis}") elif data.shape[axis] % 2: raise ValueError( f"Input array has shape {data.shape[axis]} along transform direction \ @@ -174,9 +170,7 @@ def idtcwt(coeffs, first_stage, wavelet, mode="constant", axis=-1): Magazine pp. 123 - 151, November 2005. """ if len(coeffs) < 1: - raise ValueError( - f"Coefficient list too short with {len(coeffs)} elements (minimum 1 array required)." - ) + raise ValueError(f"Coefficient list too short with {len(coeffs)} elements (minimum 1 array required).") elif len(coeffs) == 1: # level 0 inverse transform return np.sqrt(2) * coeffs[0] else: @@ -321,9 +315,7 @@ def _single_tree_synthesis_1d(coeffs, first_stage, wavelet, mode, axis): approx = idwt(cA=approx, cD=detail, wavelet=wav, mode=mode, axis=axis) approx = _normalize_size_axis(approx, first_stage_detail, axis=axis) - return idwt( - cA=approx, cD=first_stage_detail, wavelet=first_stage, mode=mode, axis=axis - ) + return idwt(cA=approx, cD=first_stage_detail, wavelet=first_stage, mode=mode, axis=axis) @lru_cache(maxsize=len(available_dt_filters())) diff --git a/skued/baseline/tests/test_algorithms.py b/skued/baseline/tests/test_algorithms.py index 7d199106..a403e125 100644 --- a/skued/baseline/tests/test_algorithms.py +++ b/skued/baseline/tests/test_algorithms.py @@ -32,15 +32,11 @@ def test_approx_rec(): assert rec_arr2.shape == arr2.shape arr2 = np.random.random(size=(102, 104)) - rec_arr2 = _dwt_approx_rec2( - arr2, level=2, wavelet="sym6", mode="constant", axis=(-2, -1) - ) + rec_arr2 = _dwt_approx_rec2(arr2, level=2, wavelet="sym6", mode="constant", axis=(-2, -1)) assert rec_arr2.shape == arr2.shape arr2 = np.random.random(size=(102, 94, 25)) - rec_arr2 = _dwt_approx_rec2( - arr2, level=2, wavelet="sym6", mode="constant", axis=(0, 1) - ) + rec_arr2 = _dwt_approx_rec2(arr2, level=2, wavelet="sym6", mode="constant", axis=(0, 1)) assert rec_arr2.shape == arr2.shape diff --git a/skued/baseline/tests/test_dtcwt.py b/skued/baseline/tests/test_dtcwt.py index 924fbf93..3d0b8e06 100644 --- a/skued/baseline/tests/test_dtcwt.py +++ b/skued/baseline/tests/test_dtcwt.py @@ -67,9 +67,7 @@ def test_perfect_reconstruction_level_1(n_dimensions): array = gen_input(n_dimensions) for first_stage in available_first_stage_filters(): coeffs = dtcwt(data=array, level=1, first_stage=first_stage, wavelet="qshift1") - reconstructed = idtcwt( - coeffs=coeffs, first_stage=first_stage, wavelet="qshift1" - ) + reconstructed = idtcwt(coeffs=coeffs, first_stage=first_stage, wavelet="qshift1") assert np.allclose(array, reconstructed) @@ -90,9 +88,7 @@ def test_perfect_reconstruction_multilevel(n_dimensions): first_stage=first_stage, wavelet=wavelet, ) - reconstructed = idtcwt( - coeffs=coeffs, first_stage=first_stage, wavelet=wavelet - ) + reconstructed = idtcwt(coeffs=coeffs, first_stage=first_stage, wavelet=wavelet) assert np.allclose(array, reconstructed) @@ -108,9 +104,7 @@ def test_axis(n_dimensions): first_stage="sym6", wavelet="qshift1", ) - reconstructed = idtcwt( - coeffs=coeffs, axis=axis, first_stage="sym6", wavelet="qshift1" - ) + reconstructed = idtcwt(coeffs=coeffs, axis=axis, first_stage="sym6", wavelet="qshift1") assert np.allclose(array, reconstructed) diff --git a/skued/eproperties.py b/skued/eproperties.py index 4772b1c2..5c2d7bc2 100644 --- a/skued/eproperties.py +++ b/skued/eproperties.py @@ -50,11 +50,7 @@ def electron_wavelength(keV): .. Kirkland 2010 Eq. 2.5 """ eV = elementary_charge * keV * 1e3 - wavelength_meters = ( - Planck - * speed_of_light - / np.sqrt(eV * (2 * electron_mass * speed_of_light**2 + eV)) - ) + wavelength_meters = Planck * speed_of_light / np.sqrt(eV * (2 * electron_mass * speed_of_light**2 + eV)) return wavelength_meters * 1e10 # wavelength in angstroms diff --git a/skued/image/brillouin.py b/skued/image/brillouin.py index 31b77770..fd68a1c4 100644 --- a/skued/image/brillouin.py +++ b/skued/image/brillouin.py @@ -100,9 +100,7 @@ def __init__(self, image, mask, peaks, center=None, optimization_radius=None): skipped_peaks = list() # assert type(optimization_radius) == float for idx, peak in enumerate(peaks): - if ( - idx > 0 - ): # do not optimize center, which is masked anyway most likely + if idx > 0: # do not optimize center, which is masked anyway most likely r, c = peaks[idx] peak = np.asarray(peak).astype(int) disk = DiskSelection( @@ -114,9 +112,7 @@ def __init__(self, image, mask, peaks, center=None, optimization_radius=None): region = image[r1:r2, c1:c2] try: true_peak_idx_local = np.where(region == region.max()) - true_peak_idx_global = np.where( - self._image == region[true_peak_idx_local] - ) + true_peak_idx_global = np.where(self._image == region[true_peak_idx_local]) new_r, new_c = ( true_peak_idx_global[0][0], true_peak_idx_global[1][0], @@ -169,14 +165,8 @@ def getVisibleBZs(self, symmetry=None, bbox=None): for r in self.__voronoi_regions: if not r.is_inf: verts = np.array(r.vertices).reshape(-1, 2) - COND1 = np.all( - np.sqrt(np.sum((verts - self.center) ** 2, axis=1)) < bbox - ) - COND2 = ( - (verts.shape[0] == self._symmetry) - if self._symmetry is not None - else True - ) + COND1 = np.all(np.sqrt(np.sum((verts - self.center) ** 2, axis=1)) < bbox) + COND2 = (verts.shape[0] == self._symmetry) if self._symmetry is not None else True if COND1 and COND2: r.add_visible_vertex(verts) r.visible_vertices = np.array(r.visible_vertices).reshape(-1, 2) @@ -204,9 +194,7 @@ def renderVisibleBZs(self, axis, BZs=None, **kwargs): from matplotlib.patches import Polygon for r in self.visible_BZs if BZs is None else BZs: - axis.add_patch( - Polygon(np.array(r.vertices).reshape(-1, 2), fill=False, **kwargs) - ) + axis.add_patch(Polygon(np.array(r.vertices).reshape(-1, 2), fill=False, **kwargs)) def determineConsistency(self, BZs=None): """ @@ -267,12 +255,8 @@ def getEquivalentScatteringVectors(self, Qvector, use_visible=False): The determined equivalent scattering vectors. The number of vectors is dependent on the set of peaks used determined by the `use_visible` parameter. """ - self.__Qvector = ( - Qvector - self.center - ) # place scat vec in pixel space with shifted origin to (0,0) - self.__bragg_peaks = ( - self.bragg_peaks - self.center - ) # place undiffracted beam at origin in pixel space + self.__Qvector = Qvector - self.center # place scat vec in pixel space with shifted origin to (0,0) + self.__bragg_peaks = self.bragg_peaks - self.center # place undiffracted beam at origin in pixel space deltas = self.__bragg_peaks - self.__Qvector CLOSEST_PEAK_INDEX = np.argmin(np.einsum("ij,ij->i", deltas, deltas)) @@ -282,37 +266,23 @@ def getEquivalentScatteringVectors(self, Qvector, use_visible=False): # Now determine relative angle of reduced wavevector to the closest bragg peak inner = np.inner(REDUCED_WAVEVECTOR, self.__bragg_peaks[CLOSEST_PEAK_INDEX]) - norms = np.linalg.norm(REDUCED_WAVEVECTOR) * np.linalg.norm( - self.__bragg_peaks[CLOSEST_PEAK_INDEX] - ) + norms = np.linalg.norm(REDUCED_WAVEVECTOR) * np.linalg.norm(self.__bragg_peaks[CLOSEST_PEAK_INDEX]) ORIGINAL_ANG = np.arccos(np.clip(inner / norms, -1.0, 1.0)) REDUCED_WAVEVECTOR_RADIUS = np.linalg.norm(REDUCED_WAVEVECTOR) ANGULAR_SEP = np.arctan2(REDUCED_WAVEVECTOR[1], REDUCED_WAVEVECTOR[0]) QVectors = list() if use_visible: for r in self.visible_BZs: - CURRENT_ANGLE = np.arctan2( - r.center[1] - self.center[1], r.center[0] - self.center[0] - ) + CURRENT_ANGLE = np.arctan2(r.center[1] - self.center[1], r.center[0] - self.center[0]) COSANG = np.cos(ANGULAR_SEP + CURRENT_ANGLE - ORIGINAL_ANG) SINANG = np.sin(ANGULAR_SEP + CURRENT_ANGLE - ORIGINAL_ANG) - QVectors.append( - r.center + REDUCED_WAVEVECTOR_RADIUS * np.array([COSANG, SINANG]) - ) + QVectors.append(r.center + REDUCED_WAVEVECTOR_RADIUS * np.array([COSANG, SINANG])) else: for peak in self.bragg_peaks: - CURRENT_ANGLE = np.arctan2( - peak[1] - self.center[1], peak[0] - self.center[0] - ) + CURRENT_ANGLE = np.arctan2(peak[1] - self.center[1], peak[0] - self.center[0]) COSANG = np.cos(ANGULAR_SEP + CURRENT_ANGLE - ORIGINAL_ANG) SINANG = np.sin(ANGULAR_SEP + CURRENT_ANGLE - ORIGINAL_ANG) - QVectors.append( - peak + REDUCED_WAVEVECTOR_RADIUS * np.array([COSANG, SINANG]) - ) - QVectors = ( - np.array(sorted(QVectors, key=lambda p: np.linalg.norm(p - self.center))) - .reshape(-1, 2) - .astype(int) - ) + QVectors.append(peak + REDUCED_WAVEVECTOR_RADIUS * np.array([COSANG, SINANG])) + QVectors = np.array(sorted(QVectors, key=lambda p: np.linalg.norm(p - self.center))).reshape(-1, 2).astype(int) return QVectors[1:, :] # ignore vector around center diff --git a/skued/image/calibration.py b/skued/image/calibration.py index 131c934b..86728028 100644 --- a/skued/image/calibration.py +++ b/skued/image/calibration.py @@ -44,9 +44,7 @@ def calq(I, crystal, peak_indices, miller_indices): I = np.asarray(I) if I.ndim != 2: - raise ValueError( - f"Expected 2D diffraction pattern, but received shape {I.shape}" - ) + raise ValueError(f"Expected 2D diffraction pattern, but received shape {I.shape}") hkl1, hkl2 = miller_indices qx1, qy1, _ = crystal.scattering_vector(hkl1) @@ -58,12 +56,8 @@ def calq(I, crystal, peak_indices, miller_indices): peak_indices_x = [peak_index[0] for peak_index in peak_indices] peak_indices_y = [peak_index[1] for peak_index in peak_indices] - slope_x, intercept_x = np.polyfit( - np.asarray(peak_indices_x), np.asarray([qx1, qx2]), deg=1 - ) - slope_y, intercept_y = np.polyfit( - np.asarray(peak_indices_y), np.asarray([qy1, qy2]), deg=1 - ) + slope_x, intercept_x = np.polyfit(np.asarray(peak_indices_x), np.asarray([qx1, qx2]), deg=1) + slope_y, intercept_y = np.polyfit(np.asarray(peak_indices_y), np.asarray([qy1, qy2]), deg=1) x_range = slope_x * np.arange(0, I.shape[0]) + intercept_x y_range = slope_y * np.arange(0, I.shape[1]) + intercept_y @@ -108,9 +102,7 @@ def powder_calq(I, crystal, peak_indices, miller_indices): I = np.asarray(I) if I.ndim > 1: - raise ValueError( - f"Expected 1D diffraction intensity, but received shape {I.shape}" - ) + raise ValueError(f"Expected 1D diffraction intensity, but received shape {I.shape}") if len(peak_indices) != len(miller_indices): raise ValueError( @@ -119,9 +111,7 @@ def powder_calq(I, crystal, peak_indices, miller_indices): ) if len(peak_indices) < 2: - raise ValueError( - f"Two peaks are required to calibrate, but received {len(peak_indices)}" - ) + raise ValueError(f"Two peaks are required to calibrate, but received {len(peak_indices)}") # scattering vector length based on known structure qs = [hypot(*crystal.scattering_vector(hkl)) for hkl in miller_indices] diff --git a/skued/image/indexing.py b/skued/image/indexing.py index 72396391..fa44246b 100644 --- a/skued/image/indexing.py +++ b/skued/image/indexing.py @@ -62,9 +62,7 @@ def bragg_peaks(im, mask=None, center=None, min_dist=None): im /= gaussian_filter(input=im, sigma=min(im.shape) / 20, truncate=2) im = np.nan_to_num(im, copy=False) - autocorr = np.abs( - cross_correlate_masked(arr1=im, arr2=im, m1=mask, m2=mask, mode="same") - ) + autocorr = np.abs(cross_correlate_masked(arr1=im, arr2=im, m1=mask, m2=mask, mode="same")) # The regions of interest are defined on the labels made # from the autocorrelation of the image. The center of the autocorr @@ -88,9 +86,7 @@ def bragg_peaks(im, mask=None, center=None, min_dist=None): labels = label(regions, return_num=False) props = regionprops(label_image=labels, intensity_image=im) - candidates = [ - prop for prop in props if not np.any(np.isnan(prop.weighted_centroid)) - ] + candidates = [prop for prop in props if not np.any(np.isnan(prop.weighted_centroid))] # Some regions are very close to each other; we prune them! if min_dist is None: @@ -107,9 +103,7 @@ def bragg_peaks(im, mask=None, center=None, min_dist=None): return sorted(peaks, key=lambda p: np.linalg.norm(p - center)) -def bragg_peaks_persistence( - im, mask=None, center=None, min_dist=None, bd_threshold=0.0, prominence=0.0 -): +def bragg_peaks_persistence(im, mask=None, center=None, min_dist=None, bd_threshold=0.0, prominence=0.0): """ Extract the position of Bragg peaks in a single-crystal diffraction pattern using 2D persistence of the image landscape. If detected peaks fall above @@ -218,38 +212,27 @@ def bragg_peaks_persistence( combos = combinations(candidates, 2) if type(min_dist) == int or type(min_dist) == float: points_to_remove = [ - point2 - for point1, point2 in combos - if np.linalg.norm(np.array(point1) - np.array(point2)) < min_dist + point2 for point1, point2 in combos if np.linalg.norm(np.array(point1) - np.array(point2)) < min_dist ] else: points_to_remove = [ point2 for point1, point2 in combos - if abs(point1[0] - point2[0]) <= min_dist[0] - and abs(point1[1] - point2[1]) <= min_dist[1] + if abs(point1[0] - point2[0]) <= min_dist[0] and abs(point1[1] - point2[1]) <= min_dist[1] ] candidates = [point for point in candidates if point not in points_to_remove] candidates = np.array(candidates).reshape(-1, 2) - peaks = np.array( - sorted(candidates, key=lambda p: np.linalg.norm(p - center)) - ).reshape(-1, 2) + peaks = np.array(sorted(candidates, key=lambda p: np.linalg.norm(p - center))).reshape(-1, 2) birth_death = np.array(birth_death).reshape(-1, 2) # remove peaks that are within the masked area if mask.sum() != mask.shape[0] * mask.shape[1]: peaks = np.array([p for p in peaks if mask[p[1], p[0]]]) - birth_death = np.array( - [bd for p, bd in zip(peaks, birth_death) if mask[p[1], p[0]]] - ) - birth_death_indices = np.array( - [bdi for p, bdi in zip(peaks, birth_death_indices) if mask[p[1], p[0]]] - ) - persistencies = np.array( - [pers for p, pers in zip(peaks, persistencies) if mask[p[1], p[0]]] - ) + birth_death = np.array([bd for p, bd in zip(peaks, birth_death) if mask[p[1], p[0]]]) + birth_death_indices = np.array([bdi for p, bdi in zip(peaks, birth_death_indices) if mask[p[1], p[0]]]) + persistencies = np.array([pers for p, pers in zip(peaks, persistencies) if mask[p[1], p[0]]]) return peaks, birth_death, birth_death_indices, persistencies @@ -357,10 +340,7 @@ def calculate(self): self._groups0[self.uf[q]] = (bl, bl - v, p) self.uf.union(oldp, q) - self._groups0 = [ - (k, self._groups0[k][0], self._groups0[k][1], self._groups0[k][2]) - for k in self._groups0 - ] + self._groups0 = [(k, self._groups0[k][0], self._groups0[k][1], self._groups0[k][2]) for k in self._groups0] self._groups0.sort(key=lambda g: g[2], reverse=True) self.persistence = self._groups0 diff --git a/skued/image/powder.py b/skued/image/powder.py index 6aef90b9..103461a5 100644 --- a/skued/image/powder.py +++ b/skued/image/powder.py @@ -66,9 +66,7 @@ def azimuthal_average(image, center, mask=None, angular_bounds=None, trim=True): if angular_bounds: mi, ma = _angle_bounds(angular_bounds) - angles = ( - np.rad2deg(np.arctan2(Y - yc, X - xc)) + 180 - ) # arctan2 is defined on [-pi, pi] but we want [0, pi] + angles = np.rad2deg(np.arctan2(Y - yc, X - xc)) + 180 # arctan2 is defined on [-pi, pi] but we want [0, pi] in_bounds = np.logical_and(mi <= angles, angles <= ma) else: in_bounds = np.ones_like(image, dtype=bool) diff --git a/skued/image/symmetry.py b/skued/image/symmetry.py index ff4f07fc..bc6f8bb6 100644 --- a/skued/image/symmetry.py +++ b/skued/image/symmetry.py @@ -41,9 +41,7 @@ def nfold(im, mod, center=None, mask=None, fill_value=0.0): ValueError : If `mod` is not a divisor of 360 deg. """ if 360 % mod: - raise ValueError( - f"{mod}-fold rotational symmetry is not valid (not a divisor of 360)." - ) + raise ValueError(f"{mod}-fold rotational symmetry is not valid (not a divisor of 360).") angles = range(0, 360, int(360 / mod)) im = np.array(im, copy=True) diff --git a/skued/image/tests/test_alignment.py b/skued/image/tests/test_alignment.py index 6288c561..839acc5f 100644 --- a/skued/image/tests/test_alignment.py +++ b/skued/image/tests/test_alignment.py @@ -52,9 +52,7 @@ def test_ialign_misaligned_canned_images(): misaligned = (ndi.shift(camera(), shift=s) for s in shifts) - for aligned, (sx, sy) in zip( - ialign(misaligned, reference=reference, mask=mask), shifts - ): + for aligned, (sx, sy) in zip(ialign(misaligned, reference=reference, mask=mask), shifts): assert np.allclose(reference[sx::, 0:-sy], aligned[sx::, 0:-sy]) diff --git a/skued/image/tests/test_calibration.py b/skued/image/tests/test_calibration.py index a1b1d892..872bf6a9 100644 --- a/skued/image/tests/test_calibration.py +++ b/skued/image/tests/test_calibration.py @@ -30,9 +30,7 @@ def test_powder_calq_simulation(): q2 = np.sqrt(Gx2**2 + Gy2**2 + Gz2**2) arr_index2 = np.argmin(np.abs(q - q2)) - calibrated = powder_calq( - I, c, peak_indices=(arr_index1, arr_index2), miller_indices=(peak1, peak2) - ) + calibrated = powder_calq(I, c, peak_indices=(arr_index1, arr_index2), miller_indices=(peak1, peak2)) assert I.shape == calibrated.shape assert np.allclose(q, calibrated, rtol=0.01) @@ -90,9 +88,7 @@ def test_detector_scattvectors_center(): def test_detector_scattvectors_default_center(): """Test that the placement of the center by default is in the center of the detector.""" - qx, qy, qz = detector_scattvectors( - keV=200, camera_length=1, shape=(512, 512), pixel_size=1e-6, center=None - ) + qx, qy, qz = detector_scattvectors(keV=200, camera_length=1, shape=(512, 512), pixel_size=1e-6, center=None) q_parallel = np.sqrt(qx**2 + qy**2) assert np.unravel_index(np.argmin(q_parallel), qx.shape) == (256, 256) @@ -101,9 +97,7 @@ def test_detector_scattvectors_default_center(): def test_detector_scattvectors_ewald_radius(): """Test that the norm of total scattering vector norm is constant and equal to the Ewald sphere radius""" - qx, qy, qz = detector_scattvectors( - keV=200, camera_length=1, shape=(128, 128), pixel_size=1e-6, center=None - ) + qx, qy, qz = detector_scattvectors(keV=200, camera_length=1, shape=(128, 128), pixel_size=1e-6, center=None) q_norm = np.sqrt(qx**2 + qy**2 + qz**2) ewald_sphere_radius = 2 * np.pi / electron_wavelength(keV=200) diff --git a/skued/image/tests/test_center.py b/skued/image/tests/test_center.py index 0925ed66..781785fd 100644 --- a/skued/image/tests/test_center.py +++ b/skued/image/tests/test_center.py @@ -84,9 +84,7 @@ def test_autocenter_shifted_with_mask(rc, cc): assert np.allclose(autocenter(im, mask=mask), center, atol=1) -CENTERS = list( - range(DIFF_PATTERN_SIZE // 3, 2 * DIFF_PATTERN_SIZE // 3, DIFF_PATTERN_SIZE // 10) -) +CENTERS = list(range(DIFF_PATTERN_SIZE // 3, 2 * DIFF_PATTERN_SIZE // 3, DIFF_PATTERN_SIZE // 10)) @pytest.mark.parametrize("rc", CENTERS) diff --git a/skued/image/tests/test_indexing_persistence.py b/skued/image/tests/test_indexing_persistence.py index 93b270d9..efc3d879 100644 --- a/skued/image/tests/test_indexing_persistence.py +++ b/skued/image/tests/test_indexing_persistence.py @@ -44,9 +44,7 @@ def test_bragg_peaks_persistence(): for refl in cryst.bounded_reflections(kk.max()) if (refl[2] == 0 and np.abs(cryst.scattering_vector(refl)[0]) < kx.max()) ] - peaks, _, _, _ = bragg_peaks_persistence( - I, mask=np.ones_like(I, dtype=bool), prominence=0.04 - ) + peaks, _, _, _ = bragg_peaks_persistence(I, mask=np.ones_like(I, dtype=bool), prominence=0.04) assert len(peaks) == len(in_plane_refls) - 1 # reflections include Q=0 diff --git a/skued/image/tests/test_powder.py b/skued/image/tests/test_powder.py index a1b9d93e..150c6471 100644 --- a/skued/image/tests/test_powder.py +++ b/skued/image/tests/test_powder.py @@ -76,9 +76,7 @@ def test_azimuthal_average_angular_bounds(): radius, intensity = azimuthal_average(image, center, angular_bounds=(60, 360)) assert np.allclose(intensity, np.zeros_like(intensity)) - radius, intensity = azimuthal_average( - image, center, angular_bounds=(60 + 360, 360 + 360) - ) + radius, intensity = azimuthal_average(image, center, angular_bounds=(60 + 360, 360 + 360)) assert np.allclose(intensity, np.zeros_like(intensity)) @@ -126,9 +124,7 @@ def test_azimuthal_average_trim_and_mask(): radius, intensity = azimuthal_average(image, center, mask=mask, trim=False) assert radius.min() == 0 - radius_trimmed, intensity_trimmed = azimuthal_average( - image, center, mask=mask, trim=True - ) + radius_trimmed, intensity_trimmed = azimuthal_average(image, center, mask=mask, trim=True) assert radius_trimmed.min() == 20 diff --git a/skued/io/diffshow.py b/skued/io/diffshow.py index 9fc12495..a245b95a 100644 --- a/skued/io/diffshow.py +++ b/skued/io/diffshow.py @@ -63,9 +63,7 @@ def update_cursor_info(self, event): val = self.viewer.getImageItem().image[i, j] except IndexError: val = 0 - self.cursor_info.setText( - f"Position: ({i},{j}) | Pixel value: {val:.2f} cnts" - ) + self.cursor_info.setText(f"Position: ({i},{j}) | Pixel value: {val:.2f} cnts") @contextmanager diff --git a/skued/io/dm.py b/skued/io/dm.py index 9c8d044e..0fce281c 100644 --- a/skued/io/dm.py +++ b/skued/io/dm.py @@ -371,9 +371,7 @@ def _readAnyData(self): arrayTypes = self._readArrayTypes() self._readArrayData(arrayTypes) else: - raise Exception( - "rAnD, " + hex(self._f.tell()) + ": Can't understand encoded type" - ) + raise Exception("rAnD, " + hex(self._f.tell()) + ": Can't understand encoded type") return 1 def _readNativeData(self, encodedType, etSize): @@ -381,12 +379,7 @@ def _readNativeData(self, encodedType, etSize): if encodedType in readFunc: val = readFunc[encodedType](self._f) else: - raise Exception( - "rND, " - + hex(self._f.tell()) - + ": Unknown data type " - + str(encodedType) - ) + raise Exception("rND, " + hex(self._f.tell()) + ": Unknown data type " + str(encodedType)) return val def _readStringData(self, stringSize): @@ -531,10 +524,7 @@ def __init__(self, filename): # raise Exception if not DM3 or DM4 if not (isDM3 or isDM4): - raise Exception( - "'%s' does not appear to be a DM3/DM4 file." - % os.path.split(self._filename)[1] - ) + raise Exception("'%s' does not appear to be a DM3/DM4 file." % os.path.split(self._filename)[1]) self._fileVersion = fileVersion @@ -720,9 +710,7 @@ def cuts(self): def pxsize(self): """Returns pixel size and unit.""" tag_root = "root.ImageList.1" - pixel_size = float( - self.tags["%s.ImageData.Calibrations.Dimension.0.Scale" % tag_root] - ) + pixel_size = float(self.tags["%s.ImageData.Calibrations.Dimension.0.Scale" % tag_root]) unit = self.tags["%s.ImageData.Calibrations.Dimension.0.Units" % tag_root] if unit == "\xb5m": unit = "micron" diff --git a/skued/patterson.py b/skued/patterson.py index d753a515..7389364b 100644 --- a/skued/patterson.py +++ b/skued/patterson.py @@ -48,7 +48,4 @@ def patterson(q, I, crystal, radii): extended_reduced_intensity = np.outer(reduced_intensity, np.ones_like(radii)) dq = np.mean(np.diff(q)) - return ( - np.sum((GG / rr) * np.sin(rr * GG) * (extended_reduced_intensity - 1), axis=0) - * dq - ) + return np.sum((GG / rr) * np.sin(rr * GG) * (extended_reduced_intensity - 1), axis=0) * dq diff --git a/skued/potential_map.py b/skued/potential_map.py index a7bc8632..1dca1586 100644 --- a/skued/potential_map.py +++ b/skued/potential_map.py @@ -74,9 +74,7 @@ def potential_map(q, I, crystal, mesh): # Extract structure factor with correction factors # Diffracted intensities add up linearly (NOT structure factors) - qx, qy, qz = change_basis_mesh( - hs, ks, ls, basis1=crystal.reciprocal_vectors, basis2=np.eye(3) - ) + qx, qy, qz = change_basis_mesh(hs, ks, ls, basis1=crystal.reciprocal_vectors, basis2=np.eye(3)) qx, qy, qz = ( qx.reshape((1, 1, 1, -1)), qy.reshape((1, 1, 1, -1)), @@ -90,9 +88,7 @@ def potential_map(q, I, crystal, mesh): # Squeeze out extra dimensions (e.g. if mesh was 2D) potential_map = np.sum( - exp_SF - * np.real(np.exp(1j * np.angle(SF))) - * np.cos(xx * qx + yy * qy + zz * qz), + exp_SF * np.real(np.exp(1j * np.angle(SF))) * np.cos(xx * qx + yy * qy + zz * qz), axis=3, ) return np.squeeze(potential_map) @@ -148,9 +144,7 @@ def potential_synthesis(reflections, intensities, crystal, mesh): experimental_SF = np.sqrt(intensities) * np.exp(1j * phases) experimental_SF = experimental_SF.reshape((1, 1, 1, -1)) - qx, qy, qz = change_basis_mesh( - hs, ks, ls, basis1=crystal.reciprocal_vectors, basis2=np.eye(3) - ) + qx, qy, qz = change_basis_mesh(hs, ks, ls, basis1=crystal.reciprocal_vectors, basis2=np.eye(3)) qx, qy, qz = ( qx.reshape((1, 1, 1, -1)), qy.reshape((1, 1, 1, -1)), diff --git a/skued/simulation/form_factors.py b/skued/simulation/form_factors.py index 61d0aa20..9d366e78 100644 --- a/skued/simulation/form_factors.py +++ b/skued/simulation/form_factors.py @@ -51,13 +51,9 @@ def affe(atom, nG): atomic_number = atom.atomic_number try: - _, a1, b1, a2, b2, a3, b3, c1, d1, c2, d2, c3, d3 = scattering_params[ - atomic_number - ] + _, a1, b1, a2, b2, a3, b3, c1, d1, c2, d2, c3, d3 = scattering_params[atomic_number] except KeyError: - raise ValueError( - f"Scattering information for element Z={atomic_number} is unavailable." - ) + raise ValueError(f"Scattering information for element Z={atomic_number} is unavailable.") # Parametrization of form factors is done in terms of q = 2 s = 2 pi |G| q = nG / (2 * np.pi) @@ -128,16 +124,10 @@ def _affe_p(element, s): .. [#] Jin-Cheng Zheng, Lijun Wu and Yimei Zhu. "Aspherical electron scattering factors and their parameterizations for elements from H to Xe" (2009). J. Appl. Cryst. vol 42, pp. 1043 - 1053. """ - affe_p_sph = _affe_parametrization( - s, aspherical_ff[element]["p0"] - ) # Numerical parametrization of Eq 12 - affe_p_p1 = _affe_parametrization( - s, aspherical_ff[element]["p1"] - ) # Numerical parametrization of Eq 13 + affe_p_sph = _affe_parametrization(s, aspherical_ff[element]["p0"]) # Numerical parametrization of Eq 12 + affe_p_p1 = _affe_parametrization(s, aspherical_ff[element]["p1"]) # Numerical parametrization of Eq 13 # Angle between the electron beam and the z-axis of the orbital # TODO: How do we find this? beta = 0 # radians - return (3 / 2) * (sin(beta) ** 2) * affe_p_sph + ( - cos(beta) ** 2 - (1 / 2) * sin(beta) ** 2 - ) * affe_p_p1 + return (3 / 2) * (sin(beta) ** 2) * affe_p_sph + (cos(beta) ** 2 - (1 / 2) * sin(beta) ** 2) * affe_p_p1 diff --git a/skued/simulation/kinematic.py b/skued/simulation/kinematic.py index d621a0ea..0484254c 100644 --- a/skued/simulation/kinematic.py +++ b/skued/simulation/kinematic.py @@ -54,9 +54,7 @@ def kinematicsim(crystal, kx, ky, energy=90): potential = pelectrostatic(crystal, xx, yy) transmission_function = np.exp(1j * interaction_parameter(energy) * potential) - exit_wave = fft.ifft2( - fft.fft2(np.ones_like(xx, dtype=complex) * transmission_function) - ) + exit_wave = fft.ifft2(fft.fft2(np.ones_like(xx, dtype=complex) * transmission_function)) intensity = fft.fftshift(np.abs(fft.fft2(exit_wave)) ** 2) kx_ = fft.fftshift(kx_) @@ -97,9 +95,7 @@ def fft2freq(x, y, indexing="xy"): elif indexing == "ij": extent_x, extent_y = x[:, 0], y[0, :] else: - raise ValueError( - "Indexing should be either 'xy' or 'ij', not {}".format(indexing) - ) + raise ValueError("Indexing should be either 'xy' or 'ij', not {}".format(indexing)) # Spacing assuming constant x and y spacing spacing_x = abs(extent_x[1] - extent_x[0]) diff --git a/skued/simulation/potential.py b/skued/simulation/potential.py index d3b44ebc..d321692b 100644 --- a/skued/simulation/potential.py +++ b/skued/simulation/potential.py @@ -19,13 +19,9 @@ def _electrostatic_atom(atom, r): try: - _, a1, b1, a2, b2, a3, b3, c1, d1, c2, d2, c3, d3 = scattering_params[ - atom.atomic_number - ] + _, a1, b1, a2, b2, a3, b3, c1, d1, c2, d2, c3, d3 = scattering_params[atom.atomic_number] except KeyError: - raise ValueError( - f"Scattering information for element {atom.element} is unavailable." - ) + raise ValueError(f"Scattering information for element {atom.element} is unavailable.") sum1 = np.zeros_like(r, dtype=float) for a, b in zip((a1, a2, a3), (b1, b2, b3)): @@ -68,9 +64,7 @@ def electrostatic(crystal, x, y, z): r = np.zeros_like(x, dtype=float) for atom in crystal: ax, ay, az = atom.coords_cartesian - r[:] = minimum_image_distance( - x - ax, y - ay, z - az, lattice=crystal.lattice_vectors - ) + r[:] = minimum_image_distance(x - ax, y - ay, z - az, lattice=crystal.lattice_vectors) potential += _electrostatic_atom(atom, r) # Due to sampling, x,y, and z might pass through the center of atoms @@ -82,19 +76,13 @@ def electrostatic(crystal, x, y, z): def _pelectrostatic_atom(atom, r): try: - _, a1, b1, a2, b2, a3, b3, c1, d1, c2, d2, c3, d3 = scattering_params[ - atom.atomic_number - ] + _, a1, b1, a2, b2, a3, b3, c1, d1, c2, d2, c3, d3 = scattering_params[atom.atomic_number] except KeyError: - raise ValueError( - f"Scattering information for element {atom.element} is unavailable." - ) + raise ValueError(f"Scattering information for element {atom.element} is unavailable.") potential = np.zeros_like(r, dtype=float) for a, b, c, d in zip((a1, a2, a3), (b1, b2, b3), (c1, c2, c3), (d1, d2, d3)): - potential += 2 * a * bessel(2 * pi * r * sqrt(b)) + (c / d) * np.exp( - -((r * pi) ** 2) / d - ) + potential += 2 * a * bessel(2 * pi * r * sqrt(b)) + (c / d) * np.exp(-((r * pi) ** 2) / d) return 2 * a0 * e * (pi**2) * potential @@ -130,9 +118,7 @@ def pelectrostatic(crystal, x, y, bounds=None): if bounds: min_z, max_z = min(bounds), max(bounds) - atoms = ( - atom for atom in iter(crystal) if min_z <= atom.coords_cartesian[2] < max_z - ) + atoms = (atom for atom in iter(crystal) if min_z <= atom.coords_cartesian[2] < max_z) else: atoms = iter(crystal) diff --git a/skued/simulation/powdersim.py b/skued/simulation/powdersim.py index cc8e8326..58aeefa0 100644 --- a/skued/simulation/powdersim.py +++ b/skued/simulation/powdersim.py @@ -32,9 +32,7 @@ def powdersim(crystal, q, fwhm_g=0.03, fwhm_l=0.06, **kwargs): """ refls = np.vstack(tuple(crystal.bounded_reflections(q.max()))) h, k, l = np.hsplit(refls, 3) - Gx, Gy, Gz = change_basis_mesh( - h, k, l, basis1=crystal.reciprocal_vectors, basis2=np.eye(3) - ) + Gx, Gy, Gz = change_basis_mesh(h, k, l, basis1=crystal.reciprocal_vectors, basis2=np.eye(3)) qs = np.sqrt(Gx**2 + Gy**2 + Gz**2) intensities = np.absolute(structure_factor(crystal, h, k, l)) ** 2 diff --git a/skued/simulation/structure_factors.py b/skued/simulation/structure_factors.py index 05294dcb..4a2d2000 100644 --- a/skued/simulation/structure_factors.py +++ b/skued/simulation/structure_factors.py @@ -39,9 +39,7 @@ def structure_factor(crystal, h, k, l, normalized=False): # This works whether G is a list of 3 numbers, a ndarray shape(3,) or # a list of meshgrid arrays. h, k, l = np.atleast_1d(h, k, l) - Gx, Gy, Gz = change_basis_mesh( - h, k, l, basis1=crystal.reciprocal_vectors, basis2=np.eye(3) - ) + Gx, Gy, Gz = change_basis_mesh(h, k, l, basis1=crystal.reciprocal_vectors, basis2=np.eye(3)) nG = np.sqrt(Gx**2 + Gy**2 + Gz**2) # Separating the structure factor into sine and cosine parts avoids adding diff --git a/skued/simulation/tests/test_potential.py b/skued/simulation/tests/test_potential.py index c9456057..fdaac115 100644 --- a/skued/simulation/tests/test_potential.py +++ b/skued/simulation/tests/test_potential.py @@ -11,9 +11,7 @@ def test_return_shape(): """Test that the return shape of pelectrostatic is the same as input arrays""" crystal = Crystal.from_database("C") - xx, yy, zz = np.meshgrid( - np.linspace(-10, 10, 16), np.linspace(-10, 10, 16), np.linspace(-10, 10, 16) - ) + xx, yy, zz = np.meshgrid(np.linspace(-10, 10, 16), np.linspace(-10, 10, 16), np.linspace(-10, 10, 16)) potential = electrostatic(crystal, xx, yy, zz) assert xx.shape == potential.shape @@ -21,9 +19,7 @@ def test_return_shape(): def test_side_effects(): """Test that mesh arrays are not written to in pelectrostatic""" - xx, yy, zz = np.meshgrid( - np.linspace(-10, 10, 16), np.linspace(-10, 10, 16), np.linspace(-10, 10, 16) - ) + xx, yy, zz = np.meshgrid(np.linspace(-10, 10, 16), np.linspace(-10, 10, 16), np.linspace(-10, 10, 16)) xx.setflags(write=False) yy.setflags(write=False) diff --git a/skued/tests/test_affine.py b/skued/tests/test_affine.py index 01e9db34..1f9af51f 100644 --- a/skued/tests/test_affine.py +++ b/skued/tests/test_affine.py @@ -151,9 +151,7 @@ def test_change_basis_mesh_trivial_basis_change(): extent = np.linspace(0, 10, 10, dtype=int) xx, yy, zz = np.meshgrid(extent, extent, extent) - XX, YY, ZZ = tr.change_basis_mesh( - xx=xx, yy=yy, zz=zz, basis1=np.eye(3), basis2=np.eye(3) - ) + XX, YY, ZZ = tr.change_basis_mesh(xx=xx, yy=yy, zz=zz, basis1=np.eye(3), basis2=np.eye(3)) assert np.allclose(xx, XX) assert np.allclose(yy, YY) assert np.allclose(zz, ZZ) @@ -169,9 +167,7 @@ def test_change_basis_mesh_coordinate_swap(): e1, e2, e3 = np.eye(3) swapped_basis = [e1, e3, e2] - XX, YY, ZZ = tr.change_basis_mesh( - xx=xx, yy=yy, zz=zz, basis1=np.eye(3), basis2=swapped_basis - ) + XX, YY, ZZ = tr.change_basis_mesh(xx=xx, yy=yy, zz=zz, basis1=np.eye(3), basis2=swapped_basis) assert np.allclose(xx, XX) assert np.allclose(yy, ZZ) assert np.allclose(zz, YY) @@ -185,9 +181,7 @@ def test_change_basis_mesh_scaling(): scaled_basis = [0.5 * e1, 0.5 * e2, 0.5 * e3] - XX, YY, ZZ = tr.change_basis_mesh( - xx=xx, yy=yy, zz=zz, basis1=np.eye(3), basis2=scaled_basis - ) + XX, YY, ZZ = tr.change_basis_mesh(xx=xx, yy=yy, zz=zz, basis1=np.eye(3), basis2=scaled_basis) assert np.allclose(2 * xx, XX) assert np.allclose(2 * yy, YY) assert np.allclose(2 * zz, ZZ) diff --git a/skued/tests/test_potential_map.py b/skued/tests/test_potential_map.py index 6f21a043..6f0e730c 100644 --- a/skued/tests/test_potential_map.py +++ b/skued/tests/test_potential_map.py @@ -71,10 +71,7 @@ def test_potential_synthesis_trivial(): """Test that potential_synthesis calculated from zero intensity is zero everywhere""" crystal = Crystal.from_database("C") reflections = list(combinations_with_replacement(range(-3, 4), 3)) - intensities = [ - np.abs(structure_factor(crystal, *reflection)) ** 2 - for reflection in reflections - ] + intensities = [np.abs(structure_factor(crystal, *reflection)) ** 2 for reflection in reflections] aR1, aR2, aR3 = crystal.lattice_vectors extent = np.arange(0, 10, 0.1) @@ -82,9 +79,7 @@ def test_potential_synthesis_trivial(): with suppress_warnings(): plane = plane_mesh(aR3, aR1 + aR2, x1=extent) - potmap = potential_synthesis( - reflections, np.zeros_like(intensities), crystal, plane - ) + potmap = potential_synthesis(reflections, np.zeros_like(intensities), crystal, plane) assert np.allclose(potmap, 0) @@ -93,10 +88,7 @@ def test_potential_synthesis_positive_intensity(): """Test that potential_synthesis raises an error if diffraction intensity is not positive""" crystal = Crystal.from_database("C") reflections = list(combinations_with_replacement(range(-3, 4), 3)) - intensities = [ - np.abs(structure_factor(crystal, *reflection)) ** 2 - for reflection in reflections - ] + intensities = [np.abs(structure_factor(crystal, *reflection)) ** 2 for reflection in reflections] aR1, aR2, aR3 = crystal.lattice_vectors extent = np.arange(0, 5, 0.1) @@ -112,10 +104,7 @@ def test_potential_synthesis_shape(): """Test that potential_synthesis returns a map with the same shape as the mesh""" crystal = Crystal.from_database("C") reflections = list(combinations_with_replacement(range(-3, 4), 3)) - intensities = [ - np.abs(structure_factor(crystal, *reflection)) ** 2 - for reflection in reflections - ] + intensities = [np.abs(structure_factor(crystal, *reflection)) ** 2 for reflection in reflections] aR1, aR2, aR3 = crystal.lattice_vectors extent = np.arange(0, 5, 0.1) diff --git a/skued/tests/test_thin_films.py b/skued/tests/test_thin_films.py index 2a4dbf98..33b6fe85 100644 --- a/skued/tests/test_thin_films.py +++ b/skued/tests/test_thin_films.py @@ -23,7 +23,5 @@ def test_film_interaction_conservation_power(): def test_film_interaction_correctness(): """Test for absorption values of VO2 on SiN substrate. Values provided from Martin R. Otto""" - _, _, A = film_optical_coefficients( - 800, thickness=90, n_film=2.9 + 1j * 0.43, n_substrate=1.9962 - ) + _, _, A = film_optical_coefficients(800, thickness=90, n_film=2.9 + 1j * 0.43, n_substrate=1.9962) assert round(abs(A - 0.312495), 5) == 0 diff --git a/skued/tests/test_voigt.py b/skued/tests/test_voigt.py index d2707cbf..e062a26a 100644 --- a/skued/tests/test_voigt.py +++ b/skued/tests/test_voigt.py @@ -20,14 +20,10 @@ def integrate_2d(x, y, f): def integrate_3d(x, y, z, f): """Numerically-integrate a function f(x, y).""" - return trapezoid( - trapezoid(trapezoid(f, z[None, None, :], axis=2), y[None, :], axis=1), x, axis=0 - ) + return trapezoid(trapezoid(trapezoid(f, z[None, None, :], axis=2), y[None, :], axis=1), x, axis=0) -multifunc = pytest.mark.parametrize( - "func", (gaussian, lorentzian, lambda x, c, w: pseudo_voigt(x, c, w, w)) -) +multifunc = pytest.mark.parametrize("func", (gaussian, lorentzian, lambda x, c, w: pseudo_voigt(x, c, w, w))) @multifunc diff --git a/skued/time_series/fitting.py b/skued/time_series/fitting.py index 268d882e..690ccf81 100644 --- a/skued/time_series/fitting.py +++ b/skued/time_series/fitting.py @@ -200,6 +200,4 @@ def _gauss_kernel(t, fwhm): t0 = t[int(len(t) / 2)] std = fwhm / (2 * sqrt(2 * log(2))) - return (1 / (np.sqrt(2 * np.pi) * std)) * np.exp( - -((1.0 * t - t0) ** 2) / (2 * std**2) - ) + return (1 / (np.sqrt(2 * np.pi) * std)) * np.exp(-((1.0 * t - t0) ** 2) / (2 * std**2)) diff --git a/skued/time_series/selections.py b/skued/time_series/selections.py index 2a60a006..23d2cdad 100644 --- a/skued/time_series/selections.py +++ b/skued/time_series/selections.py @@ -53,9 +53,7 @@ def mpatch(self, *args, **kwargs): """ top, bottom, left, right = self.bounding_box - return mpatches.Rectangle( - xy=(left, top), width=right - left, height=bottom - top, angle=0, **kwargs - ) + return mpatches.Rectangle(xy=(left, top), width=right - left, height=bottom - top, angle=0, **kwargs) # The method below should be specialized for subclasses. @property @@ -277,11 +275,7 @@ def __array__(self, *args, **kwargs): np.arange(0, self.shape[1], dtype=int) - center_row, ) distance = np.sqrt(rr**2 + cc**2) - selection[ - np.logical_and( - distance >= self._inner_radius, distance <= self._outer_radius - ) - ] = True + selection[np.logical_and(distance >= self._inner_radius, distance <= self._outer_radius)] = True return selection # TODO: make new patch class @@ -327,9 +321,7 @@ class RingArcSelection(Selection): Starting and ending angles of the 2-torus in degrees, relative to ``angle``. """ - def __init__( - self, shape, center, inner_radius, outer_radius, angle=0, theta1=0, theta2=360 - ): + def __init__(self, shape, center, inner_radius, outer_radius, angle=0, theta1=0, theta2=360): if inner_radius > outer_radius: raise ValueError("Inner radius cannot be larger than outer radius.") @@ -372,9 +364,7 @@ def __array__(self, *args, **kwargs): angle = np.rad2deg(np.arctan2(rr, cc)) + self._angle angle[:] = np.mod(angle, 360) - distance_criteria = np.logical_and( - distance >= self._inner_radius, distance <= self._outer_radius - ) + distance_criteria = np.logical_and(distance >= self._inner_radius, distance <= self._outer_radius) angle_criteria = np.logical_and(angle >= self._theta1, angle <= self._theta2) selection[np.logical_and(angle_criteria, distance_criteria)] = True return selection @@ -398,7 +388,7 @@ def mpatch(self, **kwargs): angle=self._angle, theta1=self._theta1, theta2=self._theta2, - **kwargs + **kwargs, ) inner_arc = arc(self._inner_radius) diff --git a/skued/time_series/time_zero.py b/skued/time_series/time_zero.py index 5e9e9d18..922b3e7b 100644 --- a/skued/time_series/time_zero.py +++ b/skued/time_series/time_zero.py @@ -55,9 +55,7 @@ def register_time_shift(trace, reference, method="auto"): ) if trace.ndim > 1: - raise ValueError( - f"Expected 1D time traces, but received traces of shape {trace.shape}" - ) + raise ValueError(f"Expected 1D time traces, but received traces of shape {trace.shape}") trace = trace - trace.mean() reference = reference - reference.mean() @@ -65,9 +63,7 @@ def register_time_shift(trace, reference, method="auto"): # Normalized cross-correlation # Note : we use an external function to calculate normalization # so that it can be efficiently cached - xcorr = correlate( - trace, reference, mode="full", method="auto" - ) / __xcorr_normalization(trace.size, trace.dtype) + xcorr = correlate(trace, reference, mode="full", method="auto") / __xcorr_normalization(trace.size, trace.dtype) # Generalize to the average of multiple maxima maxima = np.transpose(np.nonzero(xcorr == xcorr.max())) diff --git a/skued/voigt.py b/skued/voigt.py index ee763a5d..479c70e3 100644 --- a/skued/voigt.py +++ b/skued/voigt.py @@ -58,17 +58,11 @@ def gaussian(coordinates, center, fwhm=None, std=None): # 1D is a special case, as coordinates are not given as a list of arrays if not isinstance(coordinates, (list, tuple)): # iterable but not ndarray - return ( - 1 - / (std * np.sqrt(2 * pi)) - * np.exp(-((coordinates - center) ** 2) / (2 * std * std)) - ) + return 1 / (std * np.sqrt(2 * pi)) * np.exp(-((coordinates - center) ** 2) / (2 * std * std)) # Computation dim = len(coordinates) - exponent = sum([(x - c) ** 2 for x, c in zip(coordinates, center)]) / ( - 2 * std * std - ) + exponent = sum([(x - c) ** 2 for x, c in zip(coordinates, center)]) / (2 * std * std) factor = 1 / (std * np.sqrt(2 * pi)) ** dim return factor * np.exp(-exponent) @@ -129,9 +123,7 @@ def lorentzian(coordinates, center, fwhm): return (width / pi) / ((coordinates - center) ** 2 + width**2) dim = len(coordinates) - core = width / ( - (sum([(x - c) ** 2 for x, c in zip(coordinates, center)]) + width**2) - ) ** ((dim + 1) / 2) + core = width / ((sum([(x - c) ** 2 for x, c in zip(coordinates, center)]) + width**2)) ** ((dim + 1) / 2) factor = 1 / (dim * pi) return factor * core @@ -164,11 +156,7 @@ def _pseudo_voigt_mixing_factor(width_l, width_g): ) ** (1 / 5) # Proportion of the Voigt that should be Lorentzian - return ( - 1.36603 * (width_l / gamma) - - 0.47719 * (width_l / gamma) ** 2 - + 0.11116 * (width_l / gamma) ** 3 - ) + return 1.36603 * (width_l / gamma) - 0.47719 * (width_l / gamma) ** 2 + 0.11116 * (width_l / gamma) ** 3 def pseudo_voigt(coordinates, center, fwhm_g, fwhm_l): @@ -211,6 +199,4 @@ def pseudo_voigt(coordinates, center, fwhm_g, fwhm_l): J. of Appl. Cryst. (2000) vol. 33, pp. 1311-1316 """ eta = _pseudo_voigt_mixing_factor(fwhm_g, fwhm_l) - return (1 - eta) * gaussian(coordinates, center, fwhm_g) + eta * lorentzian( - coordinates, center, fwhm_l - ) + return (1 - eta) * gaussian(coordinates, center, fwhm_g) + eta * lorentzian(coordinates, center, fwhm_l)