From fe5d894c2cfd8e4dafd80cb08773294e66974267 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Tue, 29 Jul 2025 10:44:27 +0200 Subject: [PATCH 01/58] First attempt to vectorize kernel loop --- parcels/kernel.py | 50 +++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/parcels/kernel.py b/parcels/kernel.py index 56ae8b2f9..7a98bbe09 100644 --- a/parcels/kernel.py +++ b/parcels/kernel.py @@ -319,10 +319,9 @@ def execute(self, pset, endtime, dt): self.add_positionupdate_kernels() self._positionupdate_kernels_added = True - for p in pset: - self.evaluate_particle(p, endtime) - if p.state == StatusCode.StopAllExecution: - return StatusCode.StopAllExecution + self.evaluate_pset(pset, endtime) + if any(pset.state == StatusCode.StopAllExecution): + return StatusCode.StopAllExecution # Remove all particles that signalled deletion self.remove_deleted(pset) @@ -361,43 +360,42 @@ def execute(self, pset, endtime, dt): self.remove_deleted(pset) # Generalizable version! # Re-execute Kernels to retry particles with StatusCode.Repeat - for p in pset: - self.evaluate_particle(p, endtime) + self.evaluate_pset(pset, endtime) n_error = pset._num_error_particles - def evaluate_particle(self, p, endtime): - """Execute the kernel evaluation of for an individual particle. + def evaluate_pset(self, pset, endtime): + """Execute the kernel evaluation of for the entire particle set. Parameters ---------- - p : - object of (sub-)type Particle + pset : + object of (sub-)type ParticleSet endtime : endtime of this overall kernel evaluation step dt : computational integration timestep """ - sign_dt = 1 if p.dt >= 0 else -1 - while p.state in [StatusCode.Evaluate, StatusCode.Repeat]: - if sign_dt * (endtime - p.time_nextloop) <= 0: - return p + sign_dt = 1 if pset.dt[0] >= 0 else -1 + while pset[0].state in [StatusCode.Evaluate, StatusCode.Repeat]: + if sign_dt * (endtime - pset.time_nextloop) <= 0: + return pset - pre_dt = p.dt + pre_dt = pset.dt try: # Use next_dt from AdvectionRK45 if it is set - if abs(endtime - p.time_nextloop) < abs(p.next_dt) - np.timedelta64(1000, "ns"): - p.next_dt = sign_dt * (endtime - p.time_nextloop) + if abs(endtime - pset.time_nextloop[0]) < abs(pset.next_dt[0]) - np.timedelta64(1000, "ns"): + pset.next_dt[0] = sign_dt * (endtime - pset.time_nextloop[0]) except KeyError: - if sign_dt * (endtime - p.time_nextloop) <= p.dt: - p.dt = sign_dt * (endtime - p.time_nextloop) - res = self._pyfunc(p, self._fieldset, p.time_nextloop) + if sign_dt * (endtime - pset.time_nextloop[0]) <= pset.dt[0]: + pset.dt[0] = sign_dt * (endtime - pset.time_nextloop[0]) + res = self._pyfunc(pset, self._fieldset, pset.time_nextloop[0]) if res is None: - if p.state == StatusCode.Success: - if sign_dt * (p.time - endtime) > 0: - p.state = StatusCode.Evaluate + if pset.state[0] == StatusCode.Success: + if sign_dt * (pset.time[0] - endtime) > 0: + pset.state[0] = StatusCode.Evaluate else: - p.state = res + pset.state[0] = res - p.dt = pre_dt - return p + pset.dt = pre_dt + return pset From 867662bb9757956d346ba3d2243d55f6b1512aa4 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Tue, 29 Jul 2025 10:44:50 +0200 Subject: [PATCH 02/58] Adding radial rotation flow --- parcels/_datasets/structured/generated.py | 37 +++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/parcels/_datasets/structured/generated.py b/parcels/_datasets/structured/generated.py index 10f09e527..7951d018c 100644 --- a/parcels/_datasets/structured/generated.py +++ b/parcels/_datasets/structured/generated.py @@ -18,3 +18,40 @@ def simple_UV_dataset(dims=(360, 2, 30, 4), maxdepth=1, mesh_type="spherical"): "lon": (["XG"], np.linspace(-max_lon, max_lon, dims[3]), {"axis": "X", "c_grid_axis_shift": -0.5}), }, ) + + +def radial_rotation_dataset(xdim=200, ydim=200): # Define 2D flat, square fieldset for testing purposes. + lon = np.linspace(0, 60, xdim, dtype=np.float32) + lat = np.linspace(0, 60, ydim, dtype=np.float32) + + x0 = 30.0 # Define the origin to be the centre of the Field. + y0 = 30.0 + + U = np.zeros((ydim, xdim), dtype=np.float32) + V = np.zeros((ydim, xdim), dtype=np.float32) + + omega = 2 * np.pi / 86400.0 # Define the rotational period as 1 day. + + for i in range(lon.size): + for j in range(lat.size): + r = np.sqrt((lon[i] - x0) ** 2 + (lat[j] - y0) ** 2) + assert r >= 0.0 + assert r <= np.sqrt(x0**2 + y0**2) + + theta = np.arctan2((lat[j] - y0), (lon[i] - x0)) + assert abs(theta) <= np.pi + + U[j, i] = r * np.sin(theta) * omega + V[j, i] = -r * np.cos(theta) * omega + + return xr.Dataset( + {"U": (["YG", "XG"], U), "V": (["YG", "XG"], V)}, + coords={ + "YC": (["YC"], np.arange(ydim) + 0.5, {"axis": "Y"}), + "YG": (["YG"], np.arange(ydim), {"axis": "Y", "c_grid_axis_shift": -0.5}), + "XC": (["XC"], np.arange(xdim) + 0.5, {"axis": "X"}), + "XG": (["XG"], np.arange(xdim), {"axis": "X", "c_grid_axis_shift": -0.5}), + "lat": (["YG"], lat, {"axis": "Y", "c_grid_axis_shift": 0.5}), + "lon": (["XG"], lon, {"axis": "X", "c_grid_axis_shift": -0.5}), + }, + ) From bce90d58e99bef9f93a3ce1059afcb7a76abe4bb Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Tue, 29 Jul 2025 11:04:54 +0200 Subject: [PATCH 03/58] Fixing kernel statement --- parcels/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parcels/kernel.py b/parcels/kernel.py index 7a98bbe09..ffe79714b 100644 --- a/parcels/kernel.py +++ b/parcels/kernel.py @@ -378,7 +378,7 @@ def evaluate_pset(self, pset, endtime): """ sign_dt = 1 if pset.dt[0] >= 0 else -1 while pset[0].state in [StatusCode.Evaluate, StatusCode.Repeat]: - if sign_dt * (endtime - pset.time_nextloop) <= 0: + if all(sign_dt * (endtime - pset.time_nextloop) <= 0): return pset pre_dt = pset.dt From aa42ac27d42374573e699400d33aff1c88e0c482 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Tue, 29 Jul 2025 12:13:23 +0200 Subject: [PATCH 04/58] Make Parcels vectorized for simple kernels --- parcels/_datasets/structured/generated.py | 12 ++++++----- parcels/application_kernels/advection.py | 2 +- parcels/application_kernels/interpolation.py | 22 ++++++++++++-------- parcels/field.py | 2 +- parcels/kernel.py | 2 ++ parcels/xgrid.py | 2 +- 6 files changed, 25 insertions(+), 17 deletions(-) diff --git a/parcels/_datasets/structured/generated.py b/parcels/_datasets/structured/generated.py index 7951d018c..d31e78fcf 100644 --- a/parcels/_datasets/structured/generated.py +++ b/parcels/_datasets/structured/generated.py @@ -27,8 +27,8 @@ def radial_rotation_dataset(xdim=200, ydim=200): # Define 2D flat, square field x0 = 30.0 # Define the origin to be the centre of the Field. y0 = 30.0 - U = np.zeros((ydim, xdim), dtype=np.float32) - V = np.zeros((ydim, xdim), dtype=np.float32) + U = np.zeros((1, 1, ydim, xdim), dtype=np.float32) + V = np.zeros((1, 1, ydim, xdim), dtype=np.float32) omega = 2 * np.pi / 86400.0 # Define the rotational period as 1 day. @@ -41,12 +41,14 @@ def radial_rotation_dataset(xdim=200, ydim=200): # Define 2D flat, square field theta = np.arctan2((lat[j] - y0), (lon[i] - x0)) assert abs(theta) <= np.pi - U[j, i] = r * np.sin(theta) * omega - V[j, i] = -r * np.cos(theta) * omega + U[:, :, j, i] = r * np.sin(theta) * omega + V[:, :, j, i] = -r * np.cos(theta) * omega return xr.Dataset( - {"U": (["YG", "XG"], U), "V": (["YG", "XG"], V)}, + {"U": (["time", "depth", "YG", "XG"], U), "V": (["time", "depth", "YG", "XG"], V)}, coords={ + "time": (["time"], np.array([np.timedelta64(0, "s")]), {"axis": "T"}), + "depth": (["depth"], np.array([0]), {"axis": "Z"}), "YC": (["YC"], np.arange(ydim) + 0.5, {"axis": "Y"}), "YG": (["YG"], np.arange(ydim), {"axis": "Y", "c_grid_axis_shift": -0.5}), "XC": (["XC"], np.arange(xdim) + 0.5, {"axis": "X"}), diff --git a/parcels/application_kernels/advection.py b/parcels/application_kernels/advection.py index 82cb6c970..fb73adfbf 100644 --- a/parcels/application_kernels/advection.py +++ b/parcels/application_kernels/advection.py @@ -98,7 +98,7 @@ def AdvectionRK4_3D_CROCO(particle, fieldset, time): # pragma: no cover def AdvectionEE(particle, fieldset, time): # pragma: no cover """Advection of particles using Explicit Euler (aka Euler Forward) integration.""" dt = particle.dt / np.timedelta64(1, "s") # noqa TODO improve API for converting dt to seconds - (u1, v1) = fieldset.UV[particle] + (u1, v1) = fieldset.UV[particle.time, particle.depth, particle.lat, particle.lon, particle] particle_dlon += u1 * dt # noqa particle_dlat += v1 * dt # noqa diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index f47411652..142f0ea19 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -5,6 +5,7 @@ from typing import TYPE_CHECKING import numpy as np +import xarray as xr from parcels.field import Field from parcels.tools.statuscodes import ( @@ -39,15 +40,18 @@ def XBiLinear( yi, eta = position["Y"] zi, _ = position["Z"] - data = field.data.data[:, zi, yi : yi + 2, xi : xi + 2] - data = (1 - tau) * data[ti, :, :] + tau * data[ti + 1, :, :] - - return ( - (1 - xsi) * (1 - eta) * data[0, 0] - + xsi * (1 - eta) * data[0, 1] - + xsi * eta * data[1, 1] - + (1 - xsi) * eta * data[1, 0] - ) + if tau > 0: + data = (1 - tau) * field.data[ti, zi, :, :] + tau * field.data[ti + 1, zi, :, :] + else: + data = field.data[ti, zi, :, :] + + xi = xr.DataArray(xi, dims="points") + yi = xr.DataArray(yi, dims="points") + F00 = data.isel(XG=xi, YG=yi).values.flatten() + F10 = data.isel(XG=xi + 1, YG=yi).values.flatten() + F01 = data.isel(XG=xi, YG=yi + 1).values.flatten() + F11 = data.isel(XG=xi + 1, YG=yi + 1).values.flatten() + return (1 - xsi) * (1 - eta) * F00 + xsi * (1 - eta) * F10 + (1 - xsi) * eta * F01 + xsi * eta * F11 def XBiLinearPeriodic( diff --git a/parcels/field.py b/parcels/field.py index 46b47d69a..9a75c98ca 100644 --- a/parcels/field.py +++ b/parcels/field.py @@ -373,7 +373,7 @@ def eval(self, time: datetime, z, y, x, particle=None, applyConversion=True): def __getitem__(self, key): try: - if isinstance(key, Particle): + if isinstance(key, Particle): # TODO make this support a ParticleSet (now leads to circular import error) return self.eval(key.time, key.depth, key.lat, key.lon, key) else: return self.eval(*key) diff --git a/parcels/kernel.py b/parcels/kernel.py index ffe79714b..52d2e999b 100644 --- a/parcels/kernel.py +++ b/parcels/kernel.py @@ -376,6 +376,8 @@ def evaluate_pset(self, pset, endtime): dt : computational integration timestep """ + # TODO what happens if only some particles throw an error? + # TODO support for dt to be different for each particle sign_dt = 1 if pset.dt[0] >= 0 else -1 while pset[0].state in [StatusCode.Evaluate, StatusCode.Repeat]: if all(sign_dt * (endtime - pset.time_nextloop) <= 0): diff --git a/parcels/xgrid.py b/parcels/xgrid.py index 3c8b9a3b8..29d5960bf 100644 --- a/parcels/xgrid.py +++ b/parcels/xgrid.py @@ -466,7 +466,7 @@ def _search_1d_array( # TODO v4: We probably rework this to deal with 0D arrays before this point (as we already know field dimensionality) if len(arr) < 2: return 0, 0.0 - i = np.argmin(arr <= x) - 1 + i = np.searchsorted(arr, x, side="right") - 1 bcoord = (x - arr[i]) / (arr[i + 1] - arr[i]) return i, bcoord From f6097eba851f1ab582dbe106610eabcd592b4daa Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Tue, 29 Jul 2025 13:24:44 +0200 Subject: [PATCH 05/58] Support for Field[particle] evaluation --- parcels/application_kernels/advection.py | 2 +- parcels/field.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/parcels/application_kernels/advection.py b/parcels/application_kernels/advection.py index fb73adfbf..82cb6c970 100644 --- a/parcels/application_kernels/advection.py +++ b/parcels/application_kernels/advection.py @@ -98,7 +98,7 @@ def AdvectionRK4_3D_CROCO(particle, fieldset, time): # pragma: no cover def AdvectionEE(particle, fieldset, time): # pragma: no cover """Advection of particles using Explicit Euler (aka Euler Forward) integration.""" dt = particle.dt / np.timedelta64(1, "s") # noqa TODO improve API for converting dt to seconds - (u1, v1) = fieldset.UV[particle.time, particle.depth, particle.lat, particle.lon, particle] + (u1, v1) = fieldset.UV[particle] particle_dlon += u1 * dt # noqa particle_dlat += v1 * dt # noqa diff --git a/parcels/field.py b/parcels/field.py index 9a75c98ca..447b81348 100644 --- a/parcels/field.py +++ b/parcels/field.py @@ -373,7 +373,7 @@ def eval(self, time: datetime, z, y, x, particle=None, applyConversion=True): def __getitem__(self, key): try: - if isinstance(key, Particle): # TODO make this support a ParticleSet (now leads to circular import error) + if hasattr(key, "time"): return self.eval(key.time, key.depth, key.lat, key.lon, key) else: return self.eval(*key) From 926af778fbfacc083fd1ba3f82917d75f6e10834 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Tue, 29 Jul 2025 14:32:13 +0200 Subject: [PATCH 06/58] Making XBiLinear also work on time-evolving fields --- parcels/_core/utils/time.py | 2 +- parcels/_index_search.py | 28 ++--------- parcels/application_kernels/interpolation.py | 33 +++++++----- parcels/tools/converters.py | 4 +- parcels/xgrid.py | 53 ++++++++++++++++---- 5 files changed, 68 insertions(+), 52 deletions(-) diff --git a/parcels/_core/utils/time.py b/parcels/_core/utils/time.py index 64623eba1..c145cc506 100644 --- a/parcels/_core/utils/time.py +++ b/parcels/_core/utils/time.py @@ -46,7 +46,7 @@ def __init__(self, left: T, right: T) -> None: self.right = right def __contains__(self, item: T) -> bool: - return self.left <= item <= self.right + return all(self.left <= item) and all(item <= self.right) def __repr__(self) -> str: return f"TimeInterval(left={self.left!r}, right={self.right!r})" diff --git a/parcels/_index_search.py b/parcels/_index_search.py index 397d37216..f79f38286 100644 --- a/parcels/_index_search.py +++ b/parcels/_index_search.py @@ -39,35 +39,13 @@ def _search_time_index(field: Field, time: datetime): if the sampled value is outside the time value range. """ if field.time_interval is None: - return 0, 0 + return np.zeros(shape=time.shape, dtype=np.float32), np.zeros(shape=time.shape, dtype=np.int32) if time not in field.time_interval: _raise_time_extrapolation_error(time, field=None) - time_index = field.data.time <= time - - if time_index.all(): - # If given time > last known field time, use - # the last field frame without interpolation - ti = len(field.data.time) - 1 - - elif np.logical_not(time_index).all(): - # If given time < any time in the field, use - # the first field frame without interpolation - ti = 0 - else: - ti = int(time_index.argmin() - 1) if time_index.any() else 0 - if len(field.data.time) == 1: - tau = 0 - elif ti == len(field.data.time) - 1: - tau = 1 - else: - tau = ( - (time - field.data.time[ti]).dt.total_seconds().values - / (field.data.time[ti + 1] - field.data.time[ti]).dt.total_seconds().values - if field.data.time[ti] != field.data.time[ti + 1] - else 0 - ) + ti = np.searchsorted(field.data.time.data, time, side="right") - 1 + tau = (time - field.data.time.data[ti]) / (field.data.time.data[ti + 1] - field.data.time.data[ti]) return tau, ti diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index 142f0ea19..4c62c5c5a 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -39,19 +39,26 @@ def XBiLinear( xi, xsi = position["X"] yi, eta = position["Y"] zi, _ = position["Z"] - - if tau > 0: - data = (1 - tau) * field.data[ti, zi, :, :] + tau * field.data[ti + 1, zi, :, :] - else: - data = field.data[ti, zi, :, :] - - xi = xr.DataArray(xi, dims="points") - yi = xr.DataArray(yi, dims="points") - F00 = data.isel(XG=xi, YG=yi).values.flatten() - F10 = data.isel(XG=xi + 1, YG=yi).values.flatten() - F01 = data.isel(XG=xi, YG=yi + 1).values.flatten() - F11 = data.isel(XG=xi + 1, YG=yi + 1).values.flatten() - return (1 - xsi) * (1 - eta) * F00 + xsi * (1 - eta) * F10 + (1 - xsi) * eta * F01 + xsi * eta * F11 + axis_dim = field.grid.get_axis_dim_mapping(field.data.dims) + + data = field.data + val = np.zeros_like(tau) + + timeslices = [ti, ti + 1] if tau[0] > 0 else [ti] + for tii in timeslices: + tau_factor = (1 - tau) if ti[0] == tii[0] else tau # TODO also for varying time + xi = xr.DataArray(xi, dims="points") + yi = xr.DataArray(yi, dims="points") + zi = xr.DataArray(zi, dims="points") + tii = xr.DataArray(tii, dims="points") + F00 = data.isel({axis_dim["X"]: xi, axis_dim["Y"]: yi, axis_dim["Z"]: zi, "time": tii}).values.flatten() + F10 = data.isel({axis_dim["X"]: xi + 1, axis_dim["Y"]: yi, axis_dim["Z"]: zi, "time": tii}).values.flatten() + F01 = data.isel({axis_dim["X"]: xi, axis_dim["Y"]: yi + 1, axis_dim["Z"]: zi, "time": tii}).values.flatten() + F11 = data.isel({axis_dim["X"]: xi + 1, axis_dim["Y"]: yi + 1, axis_dim["Z"]: zi, "time": tii}).values.flatten() + val += ( + (1 - xsi) * (1 - eta) * F00 + xsi * (1 - eta) * F10 + (1 - xsi) * eta * F01 + xsi * eta * F11 + ) * tau_factor + return val def XBiLinearPeriodic( diff --git a/parcels/tools/converters.py b/parcels/tools/converters.py index 17311d205..fe0448f59 100644 --- a/parcels/tools/converters.py +++ b/parcels/tools/converters.py @@ -62,10 +62,10 @@ class GeographicPolar(UnitConverter): target_unit = "degree" def to_target(self, value, z, y, x): - return value / 1000.0 / 1.852 / 60.0 / cos(y * pi / 180) + return value / 1000.0 / 1.852 / 60.0 / np.cos(y * pi / 180) def to_source(self, value, z, y, x): - return value * 1000.0 * 1.852 * 60.0 * cos(y * pi / 180) + return value * 1000.0 * 1.852 * 60.0 * np.cos(y * pi / 180) class GeographicSquare(UnitConverter): diff --git a/parcels/xgrid.py b/parcels/xgrid.py index 29d5960bf..9f3ce8fe8 100644 --- a/parcels/xgrid.py +++ b/parcels/xgrid.py @@ -9,7 +9,6 @@ from parcels import xgcm from parcels._index_search import _search_indices_curvilinear_2d from parcels.basegrid import BaseGrid -from parcels.tools.statuscodes import FieldOutOfBoundError, FieldOutOfBoundSurfaceError _XGRID_AXES = Literal["X", "Y", "Z"] _XGRID_AXES_ORDERING: Sequence[_XGRID_AXES] = "ZYX" @@ -272,15 +271,15 @@ def search(self, z, y, x, ei=None): ds = self.xgcm_grid._ds zi, zeta = _search_1d_array(ds.depth.values, z) - if zi == -1: - if zeta < 0: - raise FieldOutOfBoundError( - f"Depth {z} is out of bounds for the grid with depth values {ds.depth.values}." - ) - elif zeta > 1: - raise FieldOutOfBoundSurfaceError( - f"Depth {z} is out of the surface for the grid with depth values {ds.depth.values}." - ) + # if any(zi == -1): # TODO throw error only for those particles where zi == -1 + # if any(zeta < 0): + # raise FieldOutOfBoundError( + # f"Depth {z} is out of bounds for the grid with depth values {ds.depth.values}." + # ) + # elif any(zeta > 1): + # raise FieldOutOfBoundSurfaceError( + # f"Depth {z} is out of the surface for the grid with depth values {ds.depth.values}." + # ) if ds.lon.ndim == 1: yi, eta = _search_1d_array(ds.lat.values, y) @@ -314,6 +313,38 @@ def _fpoint_info(self): return axis_position_mapping + def get_axis_dim_mapping(self, dims: list[str]) -> dict[_XGRID_AXES, str]: + """ + Maps xarray dimension names to their corresponding axis (X, Y, Z). + + WARNING: This API is unstable and subject to change in future versions. + + Parameters + ---------- + dims : list[str] + List of xarray dimension names + + Returns + ------- + dict[_XGRID_AXES, str] + Dictionary mapping axes (X, Y, Z) to their corresponding dimension names + + Examples + -------- + >>> grid.get_axis_dim_mapping(['time', 'lat', 'lon']) + {'Y': 'lat', 'X': 'lon'} + + Notes + ----- + Only returns mappings for spatial axes (X, Y, Z) that are present in the grid. + """ + result = {} + for dim in dims: + axis = get_axis_from_dim_name(self.xgcm_grid.axes, dim) + if axis in self.axes: # Only include spatial axes (X, Y, Z) + result[cast(_XGRID_AXES, axis)] = dim + return result + def get_axis_from_dim_name(axes: _XGCM_AXES, dim: str) -> _XGCM_AXIS_DIRECTION | None: """For a given dimension name in a grid, returns the direction axis it is on.""" @@ -465,7 +496,7 @@ def _search_1d_array( """ # TODO v4: We probably rework this to deal with 0D arrays before this point (as we already know field dimensionality) if len(arr) < 2: - return 0, 0.0 + return np.zeros(shape=x.shape, dtype=np.int32), np.zeros_like(x) i = np.searchsorted(arr, x, side="right") - 1 bcoord = (x - arr[i]) / (arr[i + 1] - arr[i]) return i, bcoord From c975c2af261aff4a68dde3858cbbf26fefd19960 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 30 Jul 2025 08:03:03 +0200 Subject: [PATCH 07/58] Fixing unit tests for vectorized kernels --- .../application_kernels/advectiondiffusion.py | 28 ++-- parcels/application_kernels/interpolation.py | 127 ++++++++++-------- parcels/xgrid.py | 12 +- tests/v4/test_advection.py | 16 ++- tests/v4/test_diffusion.py | 14 +- 5 files changed, 109 insertions(+), 88 deletions(-) diff --git a/parcels/application_kernels/advectiondiffusion.py b/parcels/application_kernels/advectiondiffusion.py index 8f83d26ae..7c13e3fbe 100644 --- a/parcels/application_kernels/advectiondiffusion.py +++ b/parcels/application_kernels/advectiondiffusion.py @@ -29,18 +29,18 @@ def AdvectionDiffusionM1(particle, fieldset, time): # pragma: no cover dWx = random.normalvariate(0, math.sqrt(math.fabs(dt))) dWy = random.normalvariate(0, math.sqrt(math.fabs(dt))) - Kxp1 = fieldset.Kh_zonal[time, particle.depth, particle.lat, particle.lon + fieldset.dres] - Kxm1 = fieldset.Kh_zonal[time, particle.depth, particle.lat, particle.lon - fieldset.dres] + Kxp1 = fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon + fieldset.dres] + Kxm1 = fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon - fieldset.dres] dKdx = (Kxp1 - Kxm1) / (2 * fieldset.dres) - u, v = fieldset.UV[time, particle.depth, particle.lat, particle.lon] - bx = math.sqrt(2 * fieldset.Kh_zonal[time, particle.depth, particle.lat, particle.lon]) + u, v = fieldset.UV[particle.time, particle.depth, particle.lat, particle.lon] + bx = math.sqrt(2 * fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon]) - Kyp1 = fieldset.Kh_meridional[time, particle.depth, particle.lat + fieldset.dres, particle.lon] - Kym1 = fieldset.Kh_meridional[time, particle.depth, particle.lat - fieldset.dres, particle.lon] + Kyp1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat + fieldset.dres, particle.lon] + Kym1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat - fieldset.dres, particle.lon] dKdy = (Kyp1 - Kym1) / (2 * fieldset.dres) - by = math.sqrt(2 * fieldset.Kh_meridional[time, particle.depth, particle.lat, particle.lon]) + by = math.sqrt(2 * fieldset.Kh_meridional[particle.time, particle.depth, particle.lat, particle.lon]) # Particle positions are updated only after evaluating all terms. particle_dlon += u * dt + 0.5 * dKdx * (dWx**2 + dt) + bx * dWx # noqa @@ -65,19 +65,19 @@ def AdvectionDiffusionEM(particle, fieldset, time): # pragma: no cover dWx = random.normalvariate(0, math.sqrt(math.fabs(dt))) dWy = random.normalvariate(0, math.sqrt(math.fabs(dt))) - u, v = fieldset.UV[time, particle.depth, particle.lat, particle.lon] + u, v = fieldset.UV[particle.time, particle.depth, particle.lat, particle.lon] - Kxp1 = fieldset.Kh_zonal[time, particle.depth, particle.lat, particle.lon + fieldset.dres] - Kxm1 = fieldset.Kh_zonal[time, particle.depth, particle.lat, particle.lon - fieldset.dres] + Kxp1 = fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon + fieldset.dres] + Kxm1 = fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon - fieldset.dres] dKdx = (Kxp1 - Kxm1) / (2 * fieldset.dres) ax = u + dKdx - bx = math.sqrt(2 * fieldset.Kh_zonal[time, particle.depth, particle.lat, particle.lon]) + bx = math.sqrt(2 * fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon]) - Kyp1 = fieldset.Kh_meridional[time, particle.depth, particle.lat + fieldset.dres, particle.lon] - Kym1 = fieldset.Kh_meridional[time, particle.depth, particle.lat - fieldset.dres, particle.lon] + Kyp1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat + fieldset.dres, particle.lon] + Kym1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat - fieldset.dres, particle.lon] dKdy = (Kyp1 - Kym1) / (2 * fieldset.dres) ay = v + dKdy - by = math.sqrt(2 * fieldset.Kh_meridional[time, particle.depth, particle.lat, particle.lon]) + by = math.sqrt(2 * fieldset.Kh_meridional[particle.time, particle.depth, particle.lat, particle.lon]) # Particle positions are updated only after evaluating all terms. particle_dlon += ax * dt + bx * dWx # noqa diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index 4c62c5c5a..c234635ec 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -8,9 +8,6 @@ import xarray as xr from parcels.field import Field -from parcels.tools.statuscodes import ( - FieldOutOfBoundError, -) if TYPE_CHECKING: from parcels.uxgrid import _UXGRID_AXES @@ -46,7 +43,7 @@ def XBiLinear( timeslices = [ti, ti + 1] if tau[0] > 0 else [ti] for tii in timeslices: - tau_factor = (1 - tau) if ti[0] == tii[0] else tau # TODO also for varying time + tau_factor = (1 - tau) if ti[0] == tii[0] else tau # TODO also for time that's different per particle? xi = xr.DataArray(xi, dims="points") yi = xr.DataArray(yi, dims="points") zi = xr.DataArray(zi, dims="points") @@ -76,32 +73,31 @@ def XBiLinearPeriodic( yi, eta = position["Y"] zi, _ = position["Z"] - if xi < 0: - xi = 0 - xsi = (x - field.grid.lon[xi]) / (field.grid.lon[xi + 1] - field.grid.lon[xi]) - if yi < 0: - yi = 0 - eta = (y - field.grid.lat[yi]) / (field.grid.lat[yi + 1] - field.grid.lat[yi]) - - data = field.data.data[:, zi, yi : yi + 2, xi : xi + 2] - data = (1 - tau) * data[ti, :, :] + tau * data[ti + 1, :, :] - - xsi = 0 if not np.isfinite(xsi) else xsi - eta = 0 if not np.isfinite(eta) else eta - - if xsi > 0 and eta > 0: - return ( - (1 - xsi) * (1 - eta) * data[0, 0] - + xsi * (1 - eta) * data[0, 1] - + xsi * eta * data[1, 1] - + (1 - xsi) * eta * data[1, 0] - ) - elif xsi > 0 and eta == 0: - return (1 - xsi) * data[0, 0] + xsi * data[0, 1] - elif xsi == 0 and eta > 0: - return (1 - eta) * data[0, 0] + eta * data[1, 0] - else: - return data[0, 0] + xi = np.where(xi > len(field.grid.lon) - 2, 0, xi) + xsi = (x - field.grid.lon[xi]) / (field.grid.lon[xi + 1] - field.grid.lon[xi]) + yi = np.where(yi > len(field.grid.lat) - 2, 0, yi) + eta = (y - field.grid.lat[yi]) / (field.grid.lat[yi + 1] - field.grid.lat[yi]) + + axis_dim = field.grid.get_axis_dim_mapping(field.data.dims) + + data = field.data + val = np.zeros_like(tau) + + timeslices = [ti, ti + 1] if tau[0] > 0 else [ti] + for tii in timeslices: + tau_factor = (1 - tau) if ti[0] == tii[0] else tau # TODO also for time that's different per particle? + xi = xr.DataArray(xi, dims="points") + yi = xr.DataArray(yi, dims="points") + zi = xr.DataArray(zi, dims="points") + ti = xr.DataArray(tii, dims="points") + F00 = data.isel({axis_dim["X"]: xi, axis_dim["Y"]: yi, axis_dim["Z"]: zi, "time": ti}).values.flatten() + F10 = data.isel({axis_dim["X"]: xi + 1, axis_dim["Y"]: yi, axis_dim["Z"]: zi, "time": ti}).values.flatten() + F01 = data.isel({axis_dim["X"]: xi, axis_dim["Y"]: yi + 1, axis_dim["Z"]: zi, "time": ti}).values.flatten() + F11 = data.isel({axis_dim["X"]: xi + 1, axis_dim["Y"]: yi + 1, axis_dim["Z"]: zi, "time": ti}).values.flatten() + val += ( + (1 - xsi) * (1 - eta) * F00 + xsi * (1 - eta) * F10 + (1 - xsi) * eta * F01 + xsi * eta * F11 + ) * tau_factor + return val def XTriLinear( @@ -119,32 +115,51 @@ def XTriLinear( yi, eta = position["Y"] zi, zeta = position["Z"] - if zi < 0 or xi < 0 or yi < 0: - raise FieldOutOfBoundError - - data = field.data.data[:, zi : zi + 2, yi : yi + 2, xi : xi + 2] - data = (1 - tau) * data[ti, :, :, :] + tau * data[ti + 1, :, :, :] - if zeta > 0: - data = (1 - zeta) * data[0, :, :] + zeta * data[1, :, :] - else: - data = data[0, :, :] - - xsi = 0 if not np.isfinite(xsi) else xsi - eta = 0 if not np.isfinite(eta) else eta - - if xsi > 0 and eta > 0: - return ( - (1 - xsi) * (1 - eta) * data[0, 0] - + xsi * (1 - eta) * data[0, 1] - + xsi * eta * data[1, 1] - + (1 - xsi) * eta * data[1, 0] - ) - elif xsi > 0 and eta == 0: - return (1 - xsi) * data[0, 0] + xsi * data[0, 1] - elif xsi == 0 and eta > 0: - return (1 - eta) * data[0, 0] + eta * data[1, 0] - else: - return data[0, 0] + axis_dim = field.grid.get_axis_dim_mapping(field.data.dims) + data = field.data + val = np.zeros_like(tau) + + # Get dimension sizes to clip indices + x_size = data.sizes[axis_dim["X"]] + y_size = data.sizes[axis_dim["Y"]] + z_size = data.sizes[axis_dim["Z"]] + t_size = data.sizes["time"] + + timeslices = [ti, ti + 1] if tau[0] > 0 else [ti] + for tii, tau_factor in zip(timeslices, [1 - tau, tau], strict=False): + for zii, depth_factor in zip([zi, zi + 1], [1 - zeta, zeta], strict=False): + # Clip indices to prevent out-of-bounds access + xi_clipped = np.clip(xi, 0, x_size - 1) + yi_clipped = np.clip(yi, 0, y_size - 1) + zii_clipped = np.clip(zii, 0, z_size - 1) + tii_clipped = np.clip(tii, 0, t_size - 1) + + xi_da = xr.DataArray(xi_clipped, dims="points") + yi_da = xr.DataArray(yi_clipped, dims="points") + zinds = xr.DataArray(zii_clipped, dims="points") + tinds = xr.DataArray(tii_clipped, dims="points") + xi_plus1 = xr.DataArray(np.clip(xi_clipped + 1, 0, x_size - 1), dims="points") + yi_plus1 = xr.DataArray(np.clip(yi_clipped + 1, 0, y_size - 1), dims="points") + + F00 = data.isel( + {axis_dim["X"]: xi_da, axis_dim["Y"]: yi_da, axis_dim["Z"]: zinds, "time": tinds} + ).values.flatten() + F10 = data.isel( + {axis_dim["X"]: xi_plus1, axis_dim["Y"]: yi_da, axis_dim["Z"]: zinds, "time": tinds} + ).values.flatten() + F01 = data.isel( + {axis_dim["X"]: xi_da, axis_dim["Y"]: yi_plus1, axis_dim["Z"]: zinds, "time": tinds} + ).values.flatten() + F11 = data.isel( + {axis_dim["X"]: xi_plus1, axis_dim["Y"]: yi_plus1, axis_dim["Z"]: zinds, "time": tinds} + ).values.flatten() + val += ( + ((1 - xsi) * (1 - eta) * F00 + xsi * (1 - eta) * F10 + (1 - xsi) * eta * F01 + xsi * eta * F11) + * tau_factor + * depth_factor + ) + + return val def UXPiecewiseConstantFace( diff --git a/parcels/xgrid.py b/parcels/xgrid.py index 9f3ce8fe8..ee58f640e 100644 --- a/parcels/xgrid.py +++ b/parcels/xgrid.py @@ -497,9 +497,15 @@ def _search_1d_array( # TODO v4: We probably rework this to deal with 0D arrays before this point (as we already know field dimensionality) if len(arr) < 2: return np.zeros(shape=x.shape, dtype=np.int32), np.zeros_like(x) - i = np.searchsorted(arr, x, side="right") - 1 - bcoord = (x - arr[i]) / (arr[i + 1] - arr[i]) - return i, bcoord + index = np.searchsorted(arr, x, side="right") - 1 + index_next = np.clip(index + 1, 1, len(arr) - 1) # Ensure we don't go out of bounds + + bcoord = np.where( + index <= len(arr) - 2, + (x - arr[index]) / (arr[index_next] - arr[index]), + np.nan, # If at the end of the array, we return np.nan + ) + return index, bcoord def _convert_center_pos_to_fpoint( diff --git a/tests/v4/test_advection.py b/tests/v4/test_advection.py index faa146619..147aee46b 100644 --- a/tests/v4/test_advection.py +++ b/tests/v4/test_advection.py @@ -63,10 +63,12 @@ def test_advection_zonal_periodic(): fieldset = FieldSet([U, V, UV]) PeriodicParticle = Particle.add_variable(Variable("total_dlon", initial=0)) - pset = ParticleSet(fieldset, pclass=PeriodicParticle, lon=[0.5], lat=[0.5]) + startlon = np.array([0.5, 0.4]) + pset = ParticleSet(fieldset, pclass=PeriodicParticle, lon=startlon, lat=[0.5, 0.5]) pset.execute([AdvectionEE, periodicBC], runtime=np.timedelta64(40, "s"), dt=np.timedelta64(1, "s")) - assert np.isclose(pset.total_dlon[0], 4, atol=1e-5) - assert np.isclose(pset.lon_nextloop[0], 0.5, atol=1e-5) + assert np.allclose(pset.total_dlon, 4, atol=1e-5) + assert np.allclose(pset.lon_nextloop, startlon, atol=1e-5) + assert np.allclose(pset.lat_nextloop, 0.5, atol=1e-5) def test_horizontal_advection_in_3D_flow(npart=10): @@ -84,7 +86,7 @@ def test_horizontal_advection_in_3D_flow(npart=10): pset.execute(AdvectionRK4, runtime=np.timedelta64(2, "h"), dt=np.timedelta64(15, "m")) expected_lon = pset.depth * (pset.time - fieldset.time_interval.left) / np.timedelta64(1, "s") - assert np.allclose(expected_lon, pset.lon_nextloop, atol=1.0e-1) + assert np.allclose(expected_lon, pset.lon, atol=1.0e-1) @pytest.mark.parametrize("direction", ["up", "down"]) @@ -181,10 +183,10 @@ def test_length1dimensions(u, v, w): # TODO: Refactor this test to be more read pset.execute(kernel, runtime=np.timedelta64(5, "s"), dt=np.timedelta64(1, "s")) assert len(pset.lon) == len([p.lon for p in pset]) - assert ((np.array([p.lon - x0 for p in pset]) - 4 * u) < 1e-6).all() - assert ((np.array([p.lat - y0 for p in pset]) - 4 * v) < 1e-6).all() + assert ((pset.lon - x0 - 4 * u) < 1e-4).all() + assert ((pset.lat - y0 - 4 * v) < 1e-4).all() if w: - assert ((np.array([p.depth - z0 for p in pset]) - 4 * w) < 1e-6).all() + assert ((pset.depth - z0 - 4 * w) < 1e-4).all() @pytest.mark.parametrize( diff --git a/tests/v4/test_diffusion.py b/tests/v4/test_diffusion.py index 31f984504..585d7ffbd 100644 --- a/tests/v4/test_diffusion.py +++ b/tests/v4/test_diffusion.py @@ -95,7 +95,7 @@ def test_randomexponential(lambd): npart = 1000 # Rate parameter for random.expovariate - fieldset.lambd = lambd + fieldset.add_constant("lambd", lambd) # Set random seed random.seed(1234) @@ -104,7 +104,7 @@ def test_randomexponential(lambd): def vertical_randomexponential(particle, fieldset, time): # pragma: no cover # Kernel for random exponential variable in depth direction - particle.depth = random.expovariate(fieldset.lambd) + particle.depth = np.random.exponential(scale=1 / fieldset.lambd, size=len(particle)) pset.execute(vertical_randomexponential, runtime=np.timedelta64(1, "s"), dt=np.timedelta64(1, "s")) @@ -131,14 +131,12 @@ def test_randomvonmises(mu, kappa): ) def vonmises(particle, fieldset, time): # pragma: no cover - particle.angle = random.vonmisesvariate(fieldset.mu, fieldset.kappa) + particle.angle = np.array([random.vonmisesvariate(fieldset.mu, fieldset.kappa) for _ in range(len(particle))]) pset.execute(vonmises, runtime=np.timedelta64(1, "s"), dt=np.timedelta64(1, "s")) - angles = np.array([p.angle for p in pset]) - - assert np.allclose(np.mean(angles), mu, atol=0.1) + assert np.allclose(np.mean(pset.angle), mu, atol=0.1) vonmises_mean = stats.vonmises.mean(kappa=kappa, loc=mu) - assert np.allclose(np.mean(angles), vonmises_mean, atol=0.1) + assert np.allclose(np.mean(pset.angle), vonmises_mean, atol=0.1) vonmises_var = stats.vonmises.var(kappa=kappa, loc=mu) - assert np.allclose(np.var(angles), vonmises_var, atol=0.1) + assert np.allclose(np.var(pset.angle), vonmises_var, atol=0.1) From 809eb78d9aa772f75c618caafb6ccbaa40a6af4f Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 30 Jul 2025 09:40:27 +0200 Subject: [PATCH 08/58] Fixing more unit tests for vectorised kernels --- parcels/_core/utils/time.py | 2 +- parcels/_index_search.py | 2 +- parcels/application_kernels/interpolation.py | 12 +++++------- parcels/particleset.py | 6 +++--- parcels/xgrid.py | 2 +- tests/v4/test_particleset_execute.py | 2 +- 6 files changed, 12 insertions(+), 14 deletions(-) diff --git a/parcels/_core/utils/time.py b/parcels/_core/utils/time.py index c145cc506..ffc3cb977 100644 --- a/parcels/_core/utils/time.py +++ b/parcels/_core/utils/time.py @@ -46,7 +46,7 @@ def __init__(self, left: T, right: T) -> None: self.right = right def __contains__(self, item: T) -> bool: - return all(self.left <= item) and all(item <= self.right) + return (self.left <= item).all() and (item <= self.right).all() def __repr__(self) -> str: return f"TimeInterval(left={self.left!r}, right={self.right!r})" diff --git a/parcels/_index_search.py b/parcels/_index_search.py index f79f38286..600cfe724 100644 --- a/parcels/_index_search.py +++ b/parcels/_index_search.py @@ -46,7 +46,7 @@ def _search_time_index(field: Field, time: datetime): ti = np.searchsorted(field.data.time.data, time, side="right") - 1 tau = (time - field.data.time.data[ti]) / (field.data.time.data[ti + 1] - field.data.time.data[ti]) - return tau, ti + return np.atleast_1d(tau), np.atleast_1d(ti) def search_indices_vertical_z(depth, gridindexingtype: GridIndexingType, z: float): diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index c234635ec..10fe6e024 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -41,9 +41,8 @@ def XBiLinear( data = field.data val = np.zeros_like(tau) - timeslices = [ti, ti + 1] if tau[0] > 0 else [ti] - for tii in timeslices: - tau_factor = (1 - tau) if ti[0] == tii[0] else tau # TODO also for time that's different per particle? + timeslices = [ti, ti + 1] if tau.any() > 0 else [ti] + for tii, tau_factor in zip(timeslices, [1 - tau, tau], strict=False): xi = xr.DataArray(xi, dims="points") yi = xr.DataArray(yi, dims="points") zi = xr.DataArray(zi, dims="points") @@ -83,9 +82,8 @@ def XBiLinearPeriodic( data = field.data val = np.zeros_like(tau) - timeslices = [ti, ti + 1] if tau[0] > 0 else [ti] - for tii in timeslices: - tau_factor = (1 - tau) if ti[0] == tii[0] else tau # TODO also for time that's different per particle? + timeslices = [ti, ti + 1] if tau.any() > 0 else [ti] + for tii, tau_factor in zip(timeslices, [1 - tau, tau], strict=False): xi = xr.DataArray(xi, dims="points") yi = xr.DataArray(yi, dims="points") zi = xr.DataArray(zi, dims="points") @@ -125,7 +123,7 @@ def XTriLinear( z_size = data.sizes[axis_dim["Z"]] t_size = data.sizes["time"] - timeslices = [ti, ti + 1] if tau[0] > 0 else [ti] + timeslices = [ti, ti + 1] if tau.any() > 0 else [ti] for tii, tau_factor in zip(timeslices, [1 - tau, tau], strict=False): for zii, depth_factor in zip([zi, zi + 1], [1 - zeta, zeta], strict=False): # Clip indices to prevent out-of-bounds access diff --git a/parcels/particleset.py b/parcels/particleset.py index 1776efdb0..43a7e4266 100644 --- a/parcels/particleset.py +++ b/parcels/particleset.py @@ -756,11 +756,11 @@ def execute( raise TypeError("The runtime must be a np.timedelta64 object") else: - if not np.isnat(self._data["time_nextloop"]).any(): + if not np.isnat(self.time_nextloop).any(): if sign_dt > 0: - start_time = self._data["time_nextloop"].min() + start_time = self.time_nextloop.min() else: - start_time = self._data["time_nextloop"].max() + start_time = self.time_nextloop.max() else: if sign_dt > 0: start_time = self.fieldset.time_interval.left diff --git a/parcels/xgrid.py b/parcels/xgrid.py index ee58f640e..987ae87ec 100644 --- a/parcels/xgrid.py +++ b/parcels/xgrid.py @@ -505,7 +505,7 @@ def _search_1d_array( (x - arr[index]) / (arr[index_next] - arr[index]), np.nan, # If at the end of the array, we return np.nan ) - return index, bcoord + return np.atleast_1d(index), np.atleast_1d(bcoord) def _convert_center_pos_to_fpoint( diff --git a/tests/v4/test_particleset_execute.py b/tests/v4/test_particleset_execute.py index b1994d4d4..e502ab31a 100644 --- a/tests/v4/test_particleset_execute.py +++ b/tests/v4/test_particleset_execute.py @@ -69,7 +69,7 @@ def AddLat(particle, fieldset, time): # pragma: no cover if with_delete: assert np.allclose(pset.lat, n * 0.1, atol=1e-12) else: - assert np.allclose([p.lat - n * 0.1 for p in pset], np.zeros(npart), rtol=1e-12) + assert np.allclose(pset.lat, n * 0.1, rtol=1e-12) @pytest.mark.parametrize( From 2ec28da067c3caefa20631dc10c141d6e27aadc9 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 30 Jul 2025 13:44:45 +0200 Subject: [PATCH 09/58] Fix diffusion tests for vectorized kernel --- parcels/_core/utils/time.py | 1 + .../application_kernels/advectiondiffusion.py | 35 ++++++++++--------- parcels/field.py | 10 +++--- parcels/tools/converters.py | 6 ++-- tests/v4/test_diffusion.py | 12 +++---- 5 files changed, 33 insertions(+), 31 deletions(-) diff --git a/parcels/_core/utils/time.py b/parcels/_core/utils/time.py index ffc3cb977..5236987a2 100644 --- a/parcels/_core/utils/time.py +++ b/parcels/_core/utils/time.py @@ -46,6 +46,7 @@ def __init__(self, left: T, right: T) -> None: self.right = right def __contains__(self, item: T) -> bool: + item = np.atleast_1d(item) return (self.left <= item).all() and (item <= self.right).all() def __repr__(self) -> str: diff --git a/parcels/application_kernels/advectiondiffusion.py b/parcels/application_kernels/advectiondiffusion.py index 7c13e3fbe..bd11ddf55 100644 --- a/parcels/application_kernels/advectiondiffusion.py +++ b/parcels/application_kernels/advectiondiffusion.py @@ -3,8 +3,7 @@ See `this tutorial <../examples/tutorial_diffusion.ipynb>`__ for a detailed explanation. """ -import math -import random +import numpy as np __all__ = ["AdvectionDiffusionEM", "AdvectionDiffusionM1", "DiffusionUniformKh"] @@ -24,23 +23,23 @@ def AdvectionDiffusionM1(particle, fieldset, time): # pragma: no cover The Wiener increment `dW` is normally distributed with zero mean and a standard deviation of sqrt(dt). """ - dt = particle.dt / np.timedelta64(1, "s") # noqa TODO improve API for converting dt to seconds + dt = particle.dt / np.timedelta64(1, "s") # Wiener increment with zero mean and std of sqrt(dt) - dWx = random.normalvariate(0, math.sqrt(math.fabs(dt))) - dWy = random.normalvariate(0, math.sqrt(math.fabs(dt))) + dWx = np.random.normal(0, np.sqrt(np.fabs(dt))) + dWy = np.random.normal(0, np.sqrt(np.fabs(dt))) Kxp1 = fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon + fieldset.dres] Kxm1 = fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon - fieldset.dres] dKdx = (Kxp1 - Kxm1) / (2 * fieldset.dres) u, v = fieldset.UV[particle.time, particle.depth, particle.lat, particle.lon] - bx = math.sqrt(2 * fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon]) + bx = np.sqrt(2 * fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon]) Kyp1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat + fieldset.dres, particle.lon] Kym1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat - fieldset.dres, particle.lon] dKdy = (Kyp1 - Kym1) / (2 * fieldset.dres) - by = math.sqrt(2 * fieldset.Kh_meridional[particle.time, particle.depth, particle.lat, particle.lon]) + by = np.sqrt(2 * fieldset.Kh_meridional[particle.time, particle.depth, particle.lat, particle.lon]) # Particle positions are updated only after evaluating all terms. particle_dlon += u * dt + 0.5 * dKdx * (dWx**2 + dt) + bx * dWx # noqa @@ -60,10 +59,10 @@ def AdvectionDiffusionEM(particle, fieldset, time): # pragma: no cover The Wiener increment `dW` is normally distributed with zero mean and a standard deviation of sqrt(dt). """ - dt = particle.dt / np.timedelta64(1, "s") # noqa TODO improve API for converting dt to seconds + dt = particle.dt / np.timedelta64(1, "s") # Wiener increment with zero mean and std of sqrt(dt) - dWx = random.normalvariate(0, math.sqrt(math.fabs(dt))) - dWy = random.normalvariate(0, math.sqrt(math.fabs(dt))) + dWx = np.random.normal(0, np.sqrt(np.fabs(dt))) + dWy = np.random.normal(0, np.sqrt(np.fabs(dt))) u, v = fieldset.UV[particle.time, particle.depth, particle.lat, particle.lon] @@ -71,13 +70,13 @@ def AdvectionDiffusionEM(particle, fieldset, time): # pragma: no cover Kxm1 = fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon - fieldset.dres] dKdx = (Kxp1 - Kxm1) / (2 * fieldset.dres) ax = u + dKdx - bx = math.sqrt(2 * fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon]) + bx = np.sqrt(2 * fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon]) Kyp1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat + fieldset.dres, particle.lon] Kym1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat - fieldset.dres, particle.lon] dKdy = (Kyp1 - Kym1) / (2 * fieldset.dres) ay = v + dKdy - by = math.sqrt(2 * fieldset.Kh_meridional[particle.time, particle.depth, particle.lat, particle.lon]) + by = np.sqrt(2 * fieldset.Kh_meridional[particle.time, particle.depth, particle.lat, particle.lon]) # Particle positions are updated only after evaluating all terms. particle_dlon += ax * dt + bx * dWx # noqa @@ -102,13 +101,15 @@ def DiffusionUniformKh(particle, fieldset, time): # pragma: no cover The Wiener increment `dW` is normally distributed with zero mean and a standard deviation of sqrt(dt). """ - dt = particle.dt / np.timedelta64(1, "s") # noqa TODO improve API for converting dt to seconds + dt = particle.dt / np.timedelta64(1, "s") # Wiener increment with zero mean and std of sqrt(dt) - dWx = random.normalvariate(0, math.sqrt(math.fabs(dt))) - dWy = random.normalvariate(0, math.sqrt(math.fabs(dt))) + dWx = np.random.normal(0, np.sqrt(np.fabs(dt))) + dWy = np.random.normal(0, np.sqrt(np.fabs(dt))) - bx = math.sqrt(2 * fieldset.Kh_zonal[particle]) - by = math.sqrt(2 * fieldset.Kh_meridional[particle]) + print(particle) + + bx = np.sqrt(2 * fieldset.Kh_zonal[particle]) + by = np.sqrt(2 * fieldset.Kh_meridional[particle]) particle_dlon += bx * dWx # noqa particle_dlat += by * dWy # noqa diff --git a/parcels/field.py b/parcels/field.py index 447b81348..39146349d 100644 --- a/parcels/field.py +++ b/parcels/field.py @@ -26,7 +26,6 @@ FieldOutOfBoundError, FieldOutOfBoundSurfaceError, FieldSamplingError, - _raise_field_out_of_bound_error, ) from parcels.uxgrid import UxGrid from parcels.xgrid import XGrid, _transpose_xfield_data_to_tzyx @@ -263,9 +262,10 @@ def eval(self, time: datetime, z, y, x, particle=None, applyConversion=True): position = self.grid.search(z, y, x, ei=_ei) value = self._interp_method(self, ti, position, tau, time, z, y, x) - if np.isnan(value): - # Detect Out-of-bounds sampling and raise exception - _raise_field_out_of_bound_error(z, y, x) + # TODO fix outof bounds sampling + # if np.isnan(value): + # # Detect Out-of-bounds sampling and raise exception + # _raise_field_out_of_bound_error(z, y, x) except (FieldSamplingError, FieldOutOfBoundError, FieldOutOfBoundSurfaceError) as e: e.add_note(f"Error interpolating field '{self.name}'.") @@ -278,7 +278,7 @@ def eval(self, time: datetime, z, y, x, particle=None, applyConversion=True): def __getitem__(self, key): self._check_velocitysampling() try: - if isinstance(key, Particle): + if hasattr(key, "time"): return self.eval(key.time, key.depth, key.lat, key.lon, key) else: return self.eval(*key) diff --git a/parcels/tools/converters.py b/parcels/tools/converters.py index fe0448f59..68c3810b6 100644 --- a/parcels/tools/converters.py +++ b/parcels/tools/converters.py @@ -1,6 +1,6 @@ from __future__ import annotations -from math import cos, pi +from math import pi import numpy as np import numpy.typing as npt @@ -90,10 +90,10 @@ class GeographicPolarSquare(UnitConverter): target_unit = "degree2" def to_target(self, value, z, y, x): - return value / pow(1000.0 * 1.852 * 60.0 * cos(y * pi / 180), 2) + return value / pow(1000.0 * 1.852 * 60.0 * np.cos(y * pi / 180), 2) def to_source(self, value, z, y, x): - return value * pow(1000.0 * 1.852 * 60.0 * cos(y * pi / 180), 2) + return value * pow(1000.0 * 1.852 * 60.0 * np.cos(y * pi / 180), 2) unitconverters_map = { diff --git a/tests/v4/test_diffusion.py b/tests/v4/test_diffusion.py index 585d7ffbd..1deb0bccd 100644 --- a/tests/v4/test_diffusion.py +++ b/tests/v4/test_diffusion.py @@ -37,7 +37,7 @@ def test_fieldKh_Brownian(mesh_type): npart = 100 runtime = np.timedelta64(2, "h") - random.seed(1234) + np.random.seed(1234) pset = ParticleSet(fieldset=fieldset, lon=np.zeros(npart), lat=np.zeros(npart)) pset.execute(pset.Kernel(DiffusionUniformKh), runtime=runtime, dt=np.timedelta64(1, "h")) @@ -77,16 +77,16 @@ def test_fieldKh_SpatiallyVaryingDiffusion(mesh_type, kernel): fieldset = FieldSet([U, V, UV, Kh_zonal, Kh_meridional]) fieldset.add_constant("dres", ds["lon"][1] - ds["lon"][0]) - npart = 100 + npart = 10000 - random.seed(1636) + np.random.seed(1636) pset = ParticleSet(fieldset=fieldset, lon=np.zeros(npart), lat=np.zeros(npart)) pset.execute(pset.Kernel(kernel), runtime=np.timedelta64(4, "h"), dt=np.timedelta64(1, "h")) tol = 2000 * mesh_conversion # effectively 2000 m errors (because of low numbers of particles) assert np.allclose(np.mean(pset.lon), 0, atol=tol) assert np.allclose(np.mean(pset.lat), 0, atol=tol) - assert stats.skew(pset.lon) > stats.skew(pset.lat) + assert abs(stats.skew(pset.lon)) > abs(stats.skew(pset.lat)) @pytest.mark.parametrize("lambd", [1, 5]) @@ -98,7 +98,7 @@ def test_randomexponential(lambd): fieldset.add_constant("lambd", lambd) # Set random seed - random.seed(1234) + np.random.seed(1234) pset = ParticleSet(fieldset=fieldset, lon=np.zeros(npart), lat=np.zeros(npart), depth=np.zeros(npart)) @@ -123,7 +123,7 @@ def test_randomvonmises(mu, kappa): fieldset.kappa = kappa # Set random seed - random.seed(1234) + np.random.seed(1234) AngleParticle = Particle.add_variable(Variable("angle")) pset = ParticleSet( From 088c2cbe0acc1b36d7b7e0bc1907207a678a9207 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 30 Jul 2025 15:39:29 +0200 Subject: [PATCH 10/58] fixing more unit tests for vectorized kernels --- parcels/kernel.py | 6 +++--- tests/v4/test_advection.py | 6 +++--- tests/v4/test_particleset.py | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/parcels/kernel.py b/parcels/kernel.py index 614c7686a..5e08beffa 100644 --- a/parcels/kernel.py +++ b/parcels/kernel.py @@ -293,11 +293,11 @@ def evaluate_pset(self, pset, endtime): pre_dt = pset.dt try: # Use next_dt from AdvectionRK45 if it is set - if abs(endtime - pset.time_nextloop[0]) < abs(pset.next_dt[0]) - np.timedelta64(1000, "ns"): - pset.next_dt[0] = sign_dt * (endtime - pset.time_nextloop[0]) + if abs(endtime - pset.time_nextloop) < abs(pset.next_dt) - np.timedelta64(1000, "ns"): + pset.next_dt = sign_dt * (endtime - pset.time_nextloop) except KeyError: if sign_dt * (endtime - pset.time_nextloop[0]) <= pset.dt[0]: - pset.dt[0] = sign_dt * (endtime - pset.time_nextloop[0]) + pset.dt = sign_dt * (endtime - pset.time_nextloop) res = None for f in self._pyfuncs: res_tmp = f(pset, self._fieldset, pset.time_nextloop[0]) diff --git a/tests/v4/test_advection.py b/tests/v4/test_advection.py index bc9a422df..b387a6a00 100644 --- a/tests/v4/test_advection.py +++ b/tests/v4/test_advection.py @@ -251,7 +251,7 @@ def truth_moving(x_0, y_0, t): pset.execute(kernel[method], dt=dt, endtime=np.timedelta64(6, "h")) exp_lon, exp_lat = truth_moving(start_lon, start_lat, pset.time[0]) - assert np.allclose(pset.lon_nextloop, exp_lon, rtol=rtol) - assert np.allclose(pset.lat_nextloop, exp_lat, rtol=rtol) + assert np.allclose(pset.lon, exp_lon, rtol=rtol) + assert np.allclose(pset.lat, exp_lat, rtol=rtol) if method == "RK4_3D": - assert np.allclose(pset.depth_nextloop, exp_lat, rtol=rtol) + assert np.allclose(pset.depth, exp_lat, rtol=rtol) diff --git a/tests/v4/test_particleset.py b/tests/v4/test_particleset.py index 714574d91..ba3dddc77 100644 --- a/tests/v4/test_particleset.py +++ b/tests/v4/test_particleset.py @@ -151,7 +151,7 @@ def Addlon(particle, fieldset, time): # pragma: no cover particle.dlon += particle.dt / np.timedelta64(1, "s") pset.execute(Addlon, dt=np.timedelta64(2, "s"), runtime=np.timedelta64(8, "s"), verbose_progress=False) - assert np.allclose([p.lon_nextloop for p in pset], [8 - t for t in times]) + assert np.allclose(pset.lon_nextloop, [8 - t for t in times]) @pytest.mark.parametrize( From cb702e1a9e1b09207b6276c0ed1080ed86164471 Mon Sep 17 00:00:00 2001 From: Joe Schoonover Date: Wed, 30 Jul 2025 10:26:57 -0400 Subject: [PATCH 11/58] Update search to handle array of lat,lon To handle array of lat,lon the x and y values need to be column stacked. After making this change, the internal API funciton in uxgrid (grid.neighbors._triangle_area) fails due to an assumed shape for the coordinate (being a scalar and not an array). The quick workaround here is to bring the `_barycentric_coordinates` method into parcels.uxgrid and leverage the numpy-ified parcels.uxgrid._triangle_area method we already have defined. In making this change, since order of operations is different for the barycentric coordinate calculation, this required updating the rounded and hashed values in the stommel gyre test. The actual values for the lat/lon still appear to be ok. --- parcels/uxgrid.py | 69 +++++++++++++++++++++------- tests/v4/test_particleset_execute.py | 4 +- 2 files changed, 55 insertions(+), 18 deletions(-) diff --git a/parcels/uxgrid.py b/parcels/uxgrid.py index 007430903..fac174025 100644 --- a/parcels/uxgrid.py +++ b/parcels/uxgrid.py @@ -5,7 +5,6 @@ import numpy as np import uxarray as ux from uxarray.grid.coordinates import _lonlat_rad_to_xyz -from uxarray.grid.neighbors import _barycentric_coordinates from parcels.field import FieldOutOfBoundError # Adjust import as necessary from parcels.xgrid import _search_1d_array @@ -96,14 +95,14 @@ def try_face(fid): return bcoords, self.ravel_index(zi, neighbor) # Global fallback as last ditch effort - face_ids = self.uxgrid.get_faces_containing_point([x, y], return_counts=False)[0] + points = np.column_stack((x, y)) + face_ids = self.uxgrid.get_faces_containing_point(points, return_counts=False)[0] fi = face_ids[0] if len(face_ids) > 0 else -1 if fi == -1: raise FieldOutOfBoundError(z, y, x) bcoords = try_face(fi) if bcoords is None: raise FieldOutOfBoundError(z, y, x) - return {"Z": (zi, zeta), "FACE": (fi, bcoords)} def _get_barycentric_coordinates_latlon(self, y, x, fi): @@ -119,9 +118,10 @@ def _get_barycentric_coordinates_latlon(self, y, x, fi): ) ) - coord = np.deg2rad([x, y]) + coord = np.deg2rad(np.column_stack((x, y))) bcoord = np.asarray(_barycentric_coordinates(nodes, coord)) - err = abs(np.dot(bcoord, nodes[:, 0]) - coord[0]) + abs(np.dot(bcoord, nodes[:, 1]) - coord[1]) + proj_coord = np.matmul(np.transpose(nodes), bcoord) + err = np.linalg.norm(proj_coord - coord) return bcoord, err def _get_barycentric_coordinates_cartesian(self, y, x, fi): @@ -146,6 +146,51 @@ def _get_barycentric_coordinates_cartesian(self, y, x, fi): return bcoord +def _triangle_area(A, B, C): + """Compute the area of a triangle given by three points.""" + d1 = B - A + d2 = C - A + d3 = np.cross(d1, d2) + return 0.5 * np.linalg.norm(d3) + + +def _barycentric_coordinates(nodes, point, min_area=1e-8): + """ + Compute the barycentric coordinates of a point P inside a convex polygon using area-based weights. + So that this method generalizes to n-sided polygons, we use the Waschpress points as the generalized + barycentric coordinates, which is only valid for convex polygons. + + Parameters + ---------- + nodes : numpy.ndarray + Spherical coordinates (lon,lat) of each corner node of a face + point : numpy.ndarray + Spherical coordinates (lon,lat) of the point + + Returns + ------- + numpy.ndarray + Barycentric coordinates corresponding to each vertex. + + """ + n = len(nodes) + sum_wi = 0 + w = [] + + for i in range(0, n): + vim1 = nodes[i - 1] + vi = nodes[i] + vi1 = nodes[(i + 1) % n] + a0 = _triangle_area(vim1, vi, vi1) + a1 = max(_triangle_area(point, vim1, vi), min_area) + a2 = max(_triangle_area(point, vi, vi1), min_area) + sum_wi += a0 / (a1 * a2) + w.append(a0 / (a1 * a2)) + barycentric_coords = [w_i / sum_wi for w_i in w] + + return barycentric_coords + + def _barycentric_coordinates_cartesian(nodes, point, min_area=1e-8): """ Compute the barycentric coordinates of a point P inside a convex polygon using area-based weights. @@ -173,20 +218,12 @@ def _barycentric_coordinates_cartesian(nodes, point, min_area=1e-8): vim1 = nodes[i - 1] vi = nodes[i] vi1 = nodes[(i + 1) % n] - a0 = _triangle_area_cartesian(vim1, vi, vi1) - a1 = max(_triangle_area_cartesian(point, vim1, vi), min_area) - a2 = max(_triangle_area_cartesian(point, vi, vi1), min_area) + a0 = _triangle_area(vim1, vi, vi1) + a1 = max(_triangle_area(point, vim1, vi), min_area) + a2 = max(_triangle_area(point, vi, vi1), min_area) sum_wi += a0 / (a1 * a2) w.append(a0 / (a1 * a2)) barycentric_coords = [w_i / sum_wi for w_i in w] return barycentric_coords - - -def _triangle_area_cartesian(A, B, C): - """Compute the area of a triangle given by three points.""" - d1 = B - A - d2 = C - A - d3 = np.cross(d1, d2) - return 0.5 * np.linalg.norm(d3) diff --git a/tests/v4/test_particleset_execute.py b/tests/v4/test_particleset_execute.py index 95f834cc3..babc9c379 100644 --- a/tests/v4/test_particleset_execute.py +++ b/tests/v4/test_particleset_execute.py @@ -155,8 +155,8 @@ def test_uxstommelgyre_pset_execute(): dt=np.timedelta64(60, "s"), pyfunc=AdvectionEE, ) - assert utils.round_and_hash_float_array([p.lon for p in pset]) == 1165396086 - assert utils.round_and_hash_float_array([p.lat for p in pset]) == 1142124776 + assert utils.round_and_hash_float_array([p.lon for p in pset]) == 1164489350 + assert utils.round_and_hash_float_array([p.lat for p in pset]) == 1143932648 @pytest.mark.xfail(reason="Output file not implemented yet") From eeff3ad5a1a7203fa22ac85b6a545478962add8b Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 31 Jul 2025 09:54:55 +0200 Subject: [PATCH 12/58] Fixing setattr on particleset level and also reverting some of the unit test assert changes (incl bac89b6ddd4d3cba22e692705c120d6530a9b2cc); that are not needed anymore --- parcels/application_kernels/interpolation.py | 1 + parcels/particleset.py | 8 ++++++++ tests/v4/test_advection.py | 14 +++++++------- tests/v4/test_particleset.py | 2 +- tests/v4/test_particleset_execute.py | 9 ++++----- 5 files changed, 21 insertions(+), 13 deletions(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index 10fe6e024..13dba26d6 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -139,6 +139,7 @@ def XTriLinear( xi_plus1 = xr.DataArray(np.clip(xi_clipped + 1, 0, x_size - 1), dims="points") yi_plus1 = xr.DataArray(np.clip(yi_clipped + 1, 0, y_size - 1), dims="points") + # TODO see if this can be done with one isel call, by combining the indices F00 = data.isel( {axis_dim["X"]: xi_da, axis_dim["Y"]: yi_da, axis_dim["Z"]: zinds, "time": tinds} ).values.flatten() diff --git a/parcels/particleset.py b/parcels/particleset.py index 843a3b4ec..2b432300e 100644 --- a/parcels/particleset.py +++ b/parcels/particleset.py @@ -202,6 +202,14 @@ def __getitem__(self, index): """Get a single particle by index.""" return Particle(self._data, index=index) + def __setattr__(self, name, value): + if name in ["_data"]: + object.__setattr__(self, name, value) + elif isinstance(self._data, dict) and name in self._data.keys(): + self._data[name][:] = value + else: + object.__setattr__(self, name, value) + @staticmethod def lonlatdepth_dtype_from_field_interp_method(field): # TODO update this when now interp methods are implemented diff --git a/tests/v4/test_advection.py b/tests/v4/test_advection.py index b387a6a00..c565a50ac 100644 --- a/tests/v4/test_advection.py +++ b/tests/v4/test_advection.py @@ -86,7 +86,7 @@ def test_horizontal_advection_in_3D_flow(npart=10): pset.execute(AdvectionRK4, runtime=np.timedelta64(2, "h"), dt=np.timedelta64(15, "m")) expected_lon = pset.depth * (pset.time - fieldset.time_interval.left) / np.timedelta64(1, "s") - assert np.allclose(expected_lon, pset.lon, atol=1.0e-1) + assert np.allclose(expected_lon, pset.lon_nextloop, atol=1.0e-1) @pytest.mark.parametrize("direction", ["up", "down"]) @@ -183,10 +183,10 @@ def test_length1dimensions(u, v, w): # TODO: Refactor this test to be more read pset.execute(kernel, runtime=np.timedelta64(5, "s"), dt=np.timedelta64(1, "s")) assert len(pset.lon) == len([p.lon for p in pset]) - assert ((pset.lon - x0 - 4 * u) < 1e-4).all() - assert ((pset.lat - y0 - 4 * v) < 1e-4).all() + assert ((np.array([p.lon - x0 for p in pset]) - 4 * u) < 1e-6).all() + assert ((np.array([p.lat - y0 for p in pset]) - 4 * v) < 1e-6).all() if w: - assert ((pset.depth - z0 - 4 * w) < 1e-4).all() + assert ((np.array([p.depth - z0 for p in pset]) - 4 * w) < 1e-6).all() @pytest.mark.parametrize( @@ -251,7 +251,7 @@ def truth_moving(x_0, y_0, t): pset.execute(kernel[method], dt=dt, endtime=np.timedelta64(6, "h")) exp_lon, exp_lat = truth_moving(start_lon, start_lat, pset.time[0]) - assert np.allclose(pset.lon, exp_lon, rtol=rtol) - assert np.allclose(pset.lat, exp_lat, rtol=rtol) + assert np.allclose(pset.lon_nextloop, exp_lon, rtol=rtol) + assert np.allclose(pset.lat_nextloop, exp_lat, rtol=rtol) if method == "RK4_3D": - assert np.allclose(pset.depth, exp_lat, rtol=rtol) + assert np.allclose(pset.depth_nextloop, exp_lat, rtol=rtol) diff --git a/tests/v4/test_particleset.py b/tests/v4/test_particleset.py index ba3dddc77..714574d91 100644 --- a/tests/v4/test_particleset.py +++ b/tests/v4/test_particleset.py @@ -151,7 +151,7 @@ def Addlon(particle, fieldset, time): # pragma: no cover particle.dlon += particle.dt / np.timedelta64(1, "s") pset.execute(Addlon, dt=np.timedelta64(2, "s"), runtime=np.timedelta64(8, "s"), verbose_progress=False) - assert np.allclose(pset.lon_nextloop, [8 - t for t in times]) + assert np.allclose([p.lon_nextloop for p in pset], [8 - t for t in times]) @pytest.mark.parametrize( diff --git a/tests/v4/test_particleset_execute.py b/tests/v4/test_particleset_execute.py index babc9c379..fac1c2c87 100644 --- a/tests/v4/test_particleset_execute.py +++ b/tests/v4/test_particleset_execute.py @@ -33,8 +33,7 @@ def test_pset_remove_particle_in_kernel(fieldset): pset = ParticleSet(fieldset, lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart)) def DeleteKernel(particle, fieldset, time): # pragma: no cover - if particle.lon >= 0.4 and particle.lon <= 0.6: - particle.state = StatusCode.Delete + particle.state = np.where((particle.lon >= 0.4) & (particle.lon <= 0.6), StatusCode.Delete, particle.state) pset.execute(pset.Kernel(DeleteKernel), runtime=np.timedelta64(1, "s"), dt=np.timedelta64(1, "s")) indices = [i for i in range(npart) if not (40 <= i < 60)] @@ -70,7 +69,7 @@ def AddLat(particle, fieldset, time): # pragma: no cover if with_delete: assert np.allclose(pset.lat, n * 0.1, atol=1e-12) else: - assert np.allclose(pset.lat, n * 0.1, rtol=1e-12) + assert np.allclose([p.lat - n * 0.1 for p in pset], np.zeros(npart), rtol=1e-12) @pytest.mark.parametrize( @@ -155,8 +154,8 @@ def test_uxstommelgyre_pset_execute(): dt=np.timedelta64(60, "s"), pyfunc=AdvectionEE, ) - assert utils.round_and_hash_float_array([p.lon for p in pset]) == 1164489350 - assert utils.round_and_hash_float_array([p.lat for p in pset]) == 1143932648 + assert utils.round_and_hash_float_array([p.lon for p in pset]) == 1165396086 + assert utils.round_and_hash_float_array([p.lat for p in pset]) == 1142124776 @pytest.mark.xfail(reason="Output file not implemented yet") From b87691d6ed07fa31f7fd2adc93c2f348694b0f6f Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 31 Jul 2025 15:09:30 +0200 Subject: [PATCH 13/58] Fixing last failing tests in vectorized kernels --- .../application_kernels/advectiondiffusion.py | 28 ++++---- parcels/application_kernels/interpolation.py | 42 ----------- parcels/field.py | 72 +++++++++---------- parcels/xgrid.py | 29 +++----- tests/v4/test_advection.py | 41 ++++++----- tests/v4/test_particleset_execute.py | 8 +-- tests/v4/test_xgrid.py | 20 +++++- 7 files changed, 107 insertions(+), 133 deletions(-) diff --git a/parcels/application_kernels/advectiondiffusion.py b/parcels/application_kernels/advectiondiffusion.py index 3e468efd8..afae2b518 100644 --- a/parcels/application_kernels/advectiondiffusion.py +++ b/parcels/application_kernels/advectiondiffusion.py @@ -28,18 +28,18 @@ def AdvectionDiffusionM1(particle, fieldset, time): # pragma: no cover dWx = np.random.normal(0, np.sqrt(np.fabs(dt))) dWy = np.random.normal(0, np.sqrt(np.fabs(dt))) - Kxp1 = fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon + fieldset.dres] - Kxm1 = fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon - fieldset.dres] + Kxp1 = fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon + fieldset.dres, particle] + Kxm1 = fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon - fieldset.dres, particle] dKdx = (Kxp1 - Kxm1) / (2 * fieldset.dres) - u, v = fieldset.UV[particle.time, particle.depth, particle.lat, particle.lon] - bx = np.sqrt(2 * fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon]) + u, v = fieldset.UV[particle.time, particle.depth, particle.lat, particle.lon, particle] + bx = np.sqrt(2 * fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon, particle]) - Kyp1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat + fieldset.dres, particle.lon] - Kym1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat - fieldset.dres, particle.lon] + Kyp1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat + fieldset.dres, particle.lon, particle] + Kym1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat - fieldset.dres, particle.lon, particle] dKdy = (Kyp1 - Kym1) / (2 * fieldset.dres) - by = np.sqrt(2 * fieldset.Kh_meridional[particle.time, particle.depth, particle.lat, particle.lon]) + by = np.sqrt(2 * fieldset.Kh_meridional[particle.time, particle.depth, particle.lat, particle.lon, particle]) # Particle positions are updated only after evaluating all terms. particle.dlon += u * dt + 0.5 * dKdx * (dWx**2 + dt) + bx * dWx @@ -64,19 +64,19 @@ def AdvectionDiffusionEM(particle, fieldset, time): # pragma: no cover dWx = np.random.normal(0, np.sqrt(np.fabs(dt))) dWy = np.random.normal(0, np.sqrt(np.fabs(dt))) - u, v = fieldset.UV[particle.time, particle.depth, particle.lat, particle.lon] + u, v = fieldset.UV[particle.time, particle.depth, particle.lat, particle.lon, particle] - Kxp1 = fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon + fieldset.dres] - Kxm1 = fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon - fieldset.dres] + Kxp1 = fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon + fieldset.dres, particle] + Kxm1 = fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon - fieldset.dres, particle] dKdx = (Kxp1 - Kxm1) / (2 * fieldset.dres) ax = u + dKdx - bx = np.sqrt(2 * fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon]) + bx = np.sqrt(2 * fieldset.Kh_zonal[particle.time, particle.depth, particle.lat, particle.lon, particle]) - Kyp1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat + fieldset.dres, particle.lon] - Kym1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat - fieldset.dres, particle.lon] + Kyp1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat + fieldset.dres, particle.lon, particle] + Kym1 = fieldset.Kh_meridional[particle.time, particle.depth, particle.lat - fieldset.dres, particle.lon, particle] dKdy = (Kyp1 - Kym1) / (2 * fieldset.dres) ay = v + dKdy - by = np.sqrt(2 * fieldset.Kh_meridional[particle.time, particle.depth, particle.lat, particle.lon]) + by = np.sqrt(2 * fieldset.Kh_meridional[particle.time, particle.depth, particle.lat, particle.lon, particle]) # Particle positions are updated only after evaluating all terms. particle.dlon += ax * dt + bx * dWx diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index 13dba26d6..fe7106443 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -17,7 +17,6 @@ "UXPiecewiseConstantFace", "UXPiecewiseLinearNode", "XBiLinear", - "XBiLinearPeriodic", "XTriLinear", ] @@ -57,47 +56,6 @@ def XBiLinear( return val -def XBiLinearPeriodic( - field: Field, - ti: int, - position: dict[_XGRID_AXES, tuple[int, float | np.ndarray]], - tau: np.float32 | np.float64, - t: np.float32 | np.float64, - z: np.float32 | np.float64, - y: np.float32 | np.float64, - x: np.float32 | np.float64, -): - """Bilinear interpolation on a regular grid with periodic boundary conditions in horizontal directions.""" - xi, xsi = position["X"] - yi, eta = position["Y"] - zi, _ = position["Z"] - - xi = np.where(xi > len(field.grid.lon) - 2, 0, xi) - xsi = (x - field.grid.lon[xi]) / (field.grid.lon[xi + 1] - field.grid.lon[xi]) - yi = np.where(yi > len(field.grid.lat) - 2, 0, yi) - eta = (y - field.grid.lat[yi]) / (field.grid.lat[yi + 1] - field.grid.lat[yi]) - - axis_dim = field.grid.get_axis_dim_mapping(field.data.dims) - - data = field.data - val = np.zeros_like(tau) - - timeslices = [ti, ti + 1] if tau.any() > 0 else [ti] - for tii, tau_factor in zip(timeslices, [1 - tau, tau], strict=False): - xi = xr.DataArray(xi, dims="points") - yi = xr.DataArray(yi, dims="points") - zi = xr.DataArray(zi, dims="points") - ti = xr.DataArray(tii, dims="points") - F00 = data.isel({axis_dim["X"]: xi, axis_dim["Y"]: yi, axis_dim["Z"]: zi, "time": ti}).values.flatten() - F10 = data.isel({axis_dim["X"]: xi + 1, axis_dim["Y"]: yi, axis_dim["Z"]: zi, "time": ti}).values.flatten() - F01 = data.isel({axis_dim["X"]: xi, axis_dim["Y"]: yi + 1, axis_dim["Z"]: zi, "time": ti}).values.flatten() - F11 = data.isel({axis_dim["X"]: xi + 1, axis_dim["Y"]: yi + 1, axis_dim["Z"]: zi, "time": ti}).values.flatten() - val += ( - (1 - xsi) * (1 - eta) * F00 + xsi * (1 - eta) * F10 + (1 - xsi) * eta * F01 + xsi * eta * F11 - ) * tau_factor - return val - - def XTriLinear( field: Field, ti: int, diff --git a/parcels/field.py b/parcels/field.py index 39146349d..1de89cf4c 100644 --- a/parcels/field.py +++ b/parcels/field.py @@ -23,12 +23,10 @@ ) from parcels.tools.statuscodes import ( AllParcelsErrorCodes, - FieldOutOfBoundError, - FieldOutOfBoundSurfaceError, - FieldSamplingError, + StatusCode, ) from parcels.uxgrid import UxGrid -from parcels.xgrid import XGrid, _transpose_xfield_data_to_tzyx +from parcels.xgrid import LEFT_OUT_OF_BOUNDS, RIGHT_OUT_OF_BOUNDS, XGrid, _transpose_xfield_data_to_tzyx from ._index_search import _search_time_index @@ -257,19 +255,11 @@ def eval(self, time: datetime, z, y, x, particle=None, applyConversion=True): # else: # _ei = particle.ei[self.igrid] - try: - tau, ti = _search_time_index(self, time) - position = self.grid.search(z, y, x, ei=_ei) - value = self._interp_method(self, ti, position, tau, time, z, y, x) - - # TODO fix outof bounds sampling - # if np.isnan(value): - # # Detect Out-of-bounds sampling and raise exception - # _raise_field_out_of_bound_error(z, y, x) + tau, ti = _search_time_index(self, time) + position = self.grid.search(z, y, x, ei=_ei) + _update_particle_states(particle, position) - except (FieldSamplingError, FieldOutOfBoundError, FieldOutOfBoundSurfaceError) as e: - e.add_note(f"Error interpolating field '{self.name}'.") - raise e + value = self._interp_method(self, ti, position, tau, time, z, y, x) if applyConversion: value = self.units.to_target(value, z, y, x) @@ -345,31 +335,28 @@ def eval(self, time: datetime, z, y, x, particle=None, applyConversion=True): # else: # _ei = particle.ei[self.igrid] - try: - tau, ti = _search_time_index(self.U, time) - position = self.grid.search(z, y, x, ei=_ei) - if self._vector_interp_method is None: - u = self.U._interp_method(self.U, ti, position, tau, time, z, y, x) - v = self.V._interp_method(self.V, ti, position, tau, time, z, y, x) - if "3D" in self.vector_type: - w = self.W._interp_method(self.W, ti, position, tau, time, z, y, x) - else: - (u, v, w) = self._vector_interp_method(self, ti, position, time, z, y, x) + tau, ti = _search_time_index(self.U, time) + position = self.grid.search(z, y, x, ei=_ei) + _update_particle_states(particle, position) - if applyConversion: - u = self.U.units.to_target(u, z, y, x) - v = self.V.units.to_target(v, z, y, x) - if "3D" in self.vector_type: - w = self.W.units.to_target(w, z, y, x) if self.W else 0.0 + if self._vector_interp_method is None: + u = self.U._interp_method(self.U, ti, position, tau, time, z, y, x) + v = self.V._interp_method(self.V, ti, position, tau, time, z, y, x) + if "3D" in self.vector_type: + w = self.W._interp_method(self.W, ti, position, tau, time, z, y, x) + else: + (u, v, w) = self._vector_interp_method(self, ti, position, time, z, y, x) + if applyConversion: + u = self.U.units.to_target(u, z, y, x) + v = self.V.units.to_target(v, z, y, x) if "3D" in self.vector_type: - return (u, v, w) - else: - return (u, v) + w = self.W.units.to_target(w, z, y, x) if self.W else 0.0 - except (FieldSamplingError, FieldOutOfBoundError, FieldOutOfBoundSurfaceError) as e: - e.add_note(f"Error interpolating field '{self.name}'.") - raise e + if "3D" in self.vector_type: + return (u, v, w) + else: + return (u, v) def __getitem__(self, key): try: @@ -381,6 +368,17 @@ def __getitem__(self, key): return _deal_with_errors(error, key, vector_type=self.vector_type) +def _update_particle_states(particle, position): + """Update the particle states based on the position dictionary.""" + if particle and "X" in position: # TODO also support uxgrid search + particle.state = np.where(position["X"][0] < 0, StatusCode.ErrorOutOfBounds, particle.state) + particle.state = np.where(position["Y"][0] < 0, StatusCode.ErrorOutOfBounds, particle.state) + particle.state = np.where(position["Z"][0] == RIGHT_OUT_OF_BOUNDS, StatusCode.ErrorOutOfBounds, particle.state) + particle.state = np.where( + position["Z"][0] == LEFT_OUT_OF_BOUNDS, StatusCode.ErrorThroughSurface, particle.state + ) + + def _assert_valid_uxdataarray(data: ux.UxDataArray): """Verifies that all the required attributes are present in the xarray.DataArray or uxarray.UxDataArray object. diff --git a/parcels/xgrid.py b/parcels/xgrid.py index 987ae87ec..4621430c8 100644 --- a/parcels/xgrid.py +++ b/parcels/xgrid.py @@ -21,6 +21,9 @@ _DEFAULT_XGCM_KWARGS = {"periodic": False} +LEFT_OUT_OF_BOUNDS = -2 +RIGHT_OUT_OF_BOUNDS = -1 + def get_cell_count_along_dim(axis: xgcm.Axis) -> int: first_coord = list(axis.coords.items())[0] @@ -271,15 +274,6 @@ def search(self, z, y, x, ei=None): ds = self.xgcm_grid._ds zi, zeta = _search_1d_array(ds.depth.values, z) - # if any(zi == -1): # TODO throw error only for those particles where zi == -1 - # if any(zeta < 0): - # raise FieldOutOfBoundError( - # f"Depth {z} is out of bounds for the grid with depth values {ds.depth.values}." - # ) - # elif any(zeta > 1): - # raise FieldOutOfBoundSurfaceError( - # f"Depth {z} is out of the surface for the grid with depth values {ds.depth.values}." - # ) if ds.lon.ndim == 1: yi, eta = _search_1d_array(ds.lat.values, y) @@ -477,7 +471,6 @@ def _search_1d_array( Searches for the particle location in a 1D array and returns barycentric coordinate along dimension. Assumptions: - - particle position x is within the bounds of the array - array is strictly monotonically increasing. Parameters @@ -489,9 +482,9 @@ def _search_1d_array( Returns ------- - int - Index of the element just before the position x in the array. - float + array of int + Index of the element just before the position x in the array. Note that this index is -2 if the index is left out of bounds and -1 if the index is right out of bounds. + array of float Barycentric coordinate. """ # TODO v4: We probably rework this to deal with 0D arrays before this point (as we already know field dimensionality) @@ -500,11 +493,11 @@ def _search_1d_array( index = np.searchsorted(arr, x, side="right") - 1 index_next = np.clip(index + 1, 1, len(arr) - 1) # Ensure we don't go out of bounds - bcoord = np.where( - index <= len(arr) - 2, - (x - arr[index]) / (arr[index_next] - arr[index]), - np.nan, # If at the end of the array, we return np.nan - ) + bcoord = (x - arr[index]) / (arr[index_next] - arr[index]) + + index = np.where(x < arr[0], LEFT_OUT_OF_BOUNDS, index) + index = np.where(x >= arr[-1], RIGHT_OUT_OF_BOUNDS, index) + return np.atleast_1d(index), np.atleast_1d(bcoord) diff --git a/tests/v4/test_advection.py b/tests/v4/test_advection.py index c565a50ac..3d19d80a4 100644 --- a/tests/v4/test_advection.py +++ b/tests/v4/test_advection.py @@ -5,7 +5,7 @@ from parcels._datasets.structured.generated import simple_UV_dataset from parcels.application_kernels.advection import AdvectionEE, AdvectionRK4, AdvectionRK4_3D, AdvectionRK45 from parcels.application_kernels.advectiondiffusion import AdvectionDiffusionEM, AdvectionDiffusionM1 -from parcels.application_kernels.interpolation import XBiLinear, XBiLinearPeriodic, XTriLinear +from parcels.application_kernels.interpolation import XBiLinear, XTriLinear from parcels.field import Field, VectorField from parcels.fieldset import FieldSet from parcels.particle import Particle, Variable @@ -46,8 +46,7 @@ def test_advection_zonal(mesh_type, npart=10): def periodicBC(particle, fieldset, time): particle.total_dlon += particle.dlon - particle.lon = np.fmod(particle.lon, fieldset.U.grid.lon[-1]) - particle.lat = np.fmod(particle.lat, fieldset.U.grid.lat[-1]) + particle.lon = np.fmod(particle.lon, 2) def test_advection_zonal_periodic(): @@ -56,9 +55,15 @@ def test_advection_zonal_periodic(): ds["lon"].data = np.array([0, 2]) ds["lat"].data = np.array([0, 2]) + # add a halo + halo = ds.isel(XG=0) + halo.lon.values = ds.lon.values[1] + 1 + halo.XG.values = ds.XG.values[1] + 2 + ds = xr.concat([ds, halo], dim="XG") + grid = XGrid.from_dataset(ds) - U = Field("U", ds["U"], grid, interp_method=XBiLinearPeriodic) - V = Field("V", ds["V"], grid, interp_method=XBiLinearPeriodic) + U = Field("U", ds["U"], grid, interp_method=XBiLinear) + V = Field("V", ds["V"], grid, interp_method=XBiLinear) UV = VectorField("UV", U, V) fieldset = FieldSet([U, V, UV]) @@ -95,7 +100,7 @@ def test_advection_3D_outofbounds(direction, wErrorThroughSurface): ds = simple_UV_dataset(mesh_type="flat") grid = XGrid.from_dataset(ds) U = Field("U", ds["U"], grid, interp_method=XTriLinear) - U.data[:] = 0.01 # Set U to 0 at the surface + U.data[:] = 0.01 # Set U to small value (to avoid horizontal out of bounds) V = Field("V", ds["V"], grid, interp_method=XTriLinear) W = Field("W", ds["V"], grid, interp_method=XTriLinear) # Use V as W for testing W.data[:] = -1.0 if direction == "up" else 1.0 @@ -104,18 +109,22 @@ def test_advection_3D_outofbounds(direction, wErrorThroughSurface): fieldset = FieldSet([U, V, W, UVW, UV]) def DeleteParticle(particle, fieldset, time): # pragma: no cover - if particle.state == StatusCode.ErrorOutOfBounds or particle.state == StatusCode.ErrorThroughSurface: - particle.state = StatusCode.Delete + particle.state = np.where(particle.state == StatusCode.ErrorOutOfBounds, StatusCode.Delete, particle.state) + particle.state = np.where(particle.state == StatusCode.ErrorThroughSurface, StatusCode.Delete, particle.state) def SubmergeParticle(particle, fieldset, time): # pragma: no cover - if particle.state == StatusCode.ErrorThroughSurface: - dt = particle.dt / np.timedelta64(1, "s") - (u, v) = fieldset.UV[particle] - particle.dlon = u * dt - particle.dlat = v * dt - particle.ddepth = 0.0 - particle.depth = 0 - particle.state = StatusCode.Evaluate + if len(particle.state) == 0: + return + inds = np.argwhere(particle.state == StatusCode.ErrorThroughSurface).flatten() + if len(inds) == 0: + return + dt = particle.dt / np.timedelta64(1, "s") + (u, v) = fieldset.UV[particle[inds]] + particle.dlon[inds] = u * dt + particle.dlat[inds] = v * dt + particle.ddepth[inds] = 0.0 + particle.depth[inds] = 0 + particle.state[inds] = StatusCode.Evaluate kernels = [AdvectionRK4_3D] if wErrorThroughSurface: diff --git a/tests/v4/test_particleset_execute.py b/tests/v4/test_particleset_execute.py index fac1c2c87..5daefa098 100644 --- a/tests/v4/test_particleset_execute.py +++ b/tests/v4/test_particleset_execute.py @@ -103,16 +103,14 @@ def test_execution_fail_python_exception(fieldset, npart=10): pset = ParticleSet(fieldset, lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart)) def PythonFail(particle, fieldset, time): # pragma: no cover - if particle.time >= fieldset.time_interval.left + np.timedelta64(10, "s"): + inds = np.argwhere(particle.time >= fieldset.time_interval.left + np.timedelta64(10, "s")) + if inds.size > 0: raise RuntimeError("Enough is enough!") - else: - pass with pytest.raises(RuntimeError): pset.execute(PythonFail, runtime=np.timedelta64(20, "s"), dt=np.timedelta64(2, "s")) assert len(pset) == npart - assert pset.time[0] == fieldset.time_interval.left + np.timedelta64(10, "s") - assert all([time == fieldset.time_interval.left + np.timedelta64(0, "s") for time in pset.time[1:]]) + assert all(pset.time == fieldset.time_interval.left + np.timedelta64(10, "s")) def test_uxstommelgyre_pset_execute(): diff --git a/tests/v4/test_xgrid.py b/tests/v4/test_xgrid.py index 30bcc7c1a..b23b981f1 100644 --- a/tests/v4/test_xgrid.py +++ b/tests/v4/test_xgrid.py @@ -7,7 +7,13 @@ from numpy.testing import assert_allclose from parcels._datasets.structured.generic import X, Y, Z, datasets -from parcels.xgrid import XGrid, _search_1d_array, _transpose_xfield_data_to_tzyx +from parcels.xgrid import ( + LEFT_OUT_OF_BOUNDS, + RIGHT_OUT_OF_BOUNDS, + XGrid, + _search_1d_array, + _transpose_xfield_data_to_tzyx, +) from tests import utils GridTestCase = namedtuple("GridTestCase", ["ds", "attr", "expected"]) @@ -162,6 +168,18 @@ def test_search_1d_array(array, x, expected_xi, expected_xsi): assert np.isclose(xsi, expected_xsi) +@pytest.mark.parametrize( + "array, x, expected_xi", + [ + (np.array([1, 2, 3, 4, 5]), -0.1, LEFT_OUT_OF_BOUNDS), + (np.array([1, 2, 3, 4, 5]), 6.5, RIGHT_OUT_OF_BOUNDS), + ], +) +def test_search_1d_array_out_of_bounds(array, x, expected_xi): + xi, xsi = _search_1d_array(array, x) + assert xi == expected_xi + + @pytest.mark.parametrize( "ds, da_name, expected", [ From 613f873f36662e650c4888c211fc5f8063fae25a Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 31 Jul 2025 15:12:31 +0200 Subject: [PATCH 14/58] Fixing import in uxgrid to use right file --- parcels/uxgrid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parcels/uxgrid.py b/parcels/uxgrid.py index fac174025..26235804c 100644 --- a/parcels/uxgrid.py +++ b/parcels/uxgrid.py @@ -6,7 +6,7 @@ import uxarray as ux from uxarray.grid.coordinates import _lonlat_rad_to_xyz -from parcels.field import FieldOutOfBoundError # Adjust import as necessary +from parcels.tools.statuscodes import FieldOutOfBoundError from parcels.xgrid import _search_1d_array from .basegrid import BaseGrid From 88c738876ff80a17915bbfbe594e774a637fbe48 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 31 Jul 2025 15:27:19 +0200 Subject: [PATCH 15/58] clean up kernel loop --- parcels/kernel.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/parcels/kernel.py b/parcels/kernel.py index 5e08beffa..04cbe39dc 100644 --- a/parcels/kernel.py +++ b/parcels/kernel.py @@ -284,9 +284,7 @@ def evaluate_pset(self, pset, endtime): dt : computational integration timestep """ - # TODO what happens if only some particles throw an error? - # TODO support for dt to be different for each particle - sign_dt = 1 if pset.dt[0] >= 0 else -1 + sign_dt = np.where(pset.dt >= 0, 1, -1) while pset[0].state in [StatusCode.Evaluate, StatusCode.Repeat]: if all(sign_dt * (endtime - pset.time_nextloop) <= 0): return pset @@ -296,10 +294,14 @@ def evaluate_pset(self, pset, endtime): if abs(endtime - pset.time_nextloop) < abs(pset.next_dt) - np.timedelta64(1000, "ns"): pset.next_dt = sign_dt * (endtime - pset.time_nextloop) except KeyError: - if sign_dt * (endtime - pset.time_nextloop[0]) <= pset.dt[0]: - pset.dt = sign_dt * (endtime - pset.time_nextloop) + pset.dt = np.where( + sign_dt * (endtime - pset.time_nextloop) <= pset.dt, + sign_dt * (endtime - pset.time_nextloop), + pset.dt, + ) res = None for f in self._pyfuncs: + # TODO remove "time" from kernel signature in v4; because it doesn't make sense for vectorized particles res_tmp = f(pset, self._fieldset, pset.time_nextloop[0]) if res_tmp is not None: # TODO v4: Remove once all kernels return StatusCode res = res_tmp @@ -307,11 +309,13 @@ def evaluate_pset(self, pset, endtime): break if res is None: - if pset.state[0] == StatusCode.Success: - if sign_dt * (pset.time[0] - endtime) > 0: - pset.state[0] = StatusCode.Evaluate - else: - pset.state[0] = res + pset.state = np.where( + (pset.state == StatusCode.Success) & (sign_dt * (pset.time - endtime) > 0), + StatusCode.Evaluate, + pset.state, + ) + else: # TODO need to think how the kernel exitcode works on vectorized particleset + pset.state = res pset.dt = pre_dt return pset From 6c4acc03d0c63220fac7a111edc04c8cae131591 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 31 Jul 2025 16:08:26 +0200 Subject: [PATCH 16/58] Setting default interpolators on Fields And cleaning up the existing interpolators --- parcels/application_kernels/interpolation.py | 40 +++++--------------- parcels/field.py | 19 ++-------- tests/utils.py | 6 +-- tests/v4/test_advection.py | 36 +++++++++--------- tests/v4/test_diffusion.py | 18 ++++----- tests/v4/test_interpolation.py | 23 +++++++++-- 6 files changed, 61 insertions(+), 81 deletions(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index fe7106443..448e86313 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -7,56 +7,34 @@ import numpy as np import xarray as xr -from parcels.field import Field - if TYPE_CHECKING: + from parcels.field import Field from parcels.uxgrid import _UXGRID_AXES from parcels.xgrid import _XGRID_AXES __all__ = [ "UXPiecewiseConstantFace", "UXPiecewiseLinearNode", - "XBiLinear", - "XTriLinear", + "XLinear", + "ZeroInterpolator", ] -def XBiLinear( +def ZeroInterpolator( field: Field, ti: int, - position: dict[_XGRID_AXES, tuple[int, float | np.ndarray]], + position: dict[str, tuple[int, float | np.ndarray]], tau: np.float32 | np.float64, t: np.float32 | np.float64, z: np.float32 | np.float64, y: np.float32 | np.float64, x: np.float32 | np.float64, -): - """Bilinear interpolation on a regular grid.""" - xi, xsi = position["X"] - yi, eta = position["Y"] - zi, _ = position["Z"] - axis_dim = field.grid.get_axis_dim_mapping(field.data.dims) - - data = field.data - val = np.zeros_like(tau) - - timeslices = [ti, ti + 1] if tau.any() > 0 else [ti] - for tii, tau_factor in zip(timeslices, [1 - tau, tau], strict=False): - xi = xr.DataArray(xi, dims="points") - yi = xr.DataArray(yi, dims="points") - zi = xr.DataArray(zi, dims="points") - tii = xr.DataArray(tii, dims="points") - F00 = data.isel({axis_dim["X"]: xi, axis_dim["Y"]: yi, axis_dim["Z"]: zi, "time": tii}).values.flatten() - F10 = data.isel({axis_dim["X"]: xi + 1, axis_dim["Y"]: yi, axis_dim["Z"]: zi, "time": tii}).values.flatten() - F01 = data.isel({axis_dim["X"]: xi, axis_dim["Y"]: yi + 1, axis_dim["Z"]: zi, "time": tii}).values.flatten() - F11 = data.isel({axis_dim["X"]: xi + 1, axis_dim["Y"]: yi + 1, axis_dim["Z"]: zi, "time": tii}).values.flatten() - val += ( - (1 - xsi) * (1 - eta) * F00 + xsi * (1 - eta) * F10 + (1 - xsi) * eta * F01 + xsi * eta * F11 - ) * tau_factor - return val +) -> np.float32 | np.float64: + """Template function used for the signature check of the lateral interpolation methods.""" + return 0.0 -def XTriLinear( +def XLinear( field: Field, ti: int, position: dict[_XGRID_AXES, tuple[int, float | np.ndarray]], diff --git a/parcels/field.py b/parcels/field.py index 1de89cf4c..cc307dd81 100644 --- a/parcels/field.py +++ b/parcels/field.py @@ -16,6 +16,7 @@ VectorType, assert_valid_mesh, ) +from parcels.application_kernels.interpolation import UXPiecewiseLinearNode, XLinear, ZeroInterpolator from parcels.particle import Particle from parcels.tools.converters import ( UnitConverter, @@ -49,23 +50,9 @@ def _deal_with_errors(error, key, vector_type: VectorType): return 0 -def ZeroInterpolator( - field: Field, - ti: int, - position: dict[str, tuple[int, float | np.ndarray]], - tau: np.float32 | np.float64, - t: np.float32 | np.float64, - z: np.float32 | np.float64, - y: np.float32 | np.float64, - x: np.float32 | np.float64, -) -> np.float32 | np.float64: - """Template function used for the signature check of the lateral interpolation methods.""" - return 0.0 - - _DEFAULT_INTERPOLATOR_MAPPING = { - XGrid: ZeroInterpolator, # TODO v4: Update these to better defaults - UxGrid: ZeroInterpolator, + XGrid: XLinear, + UxGrid: UXPiecewiseLinearNode, } diff --git a/tests/utils.py b/tests/utils.py index fc792d73b..551f82edd 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -10,7 +10,7 @@ import parcels from parcels._datasets.structured.generated import simple_UV_dataset -from parcels.application_kernels.interpolation import XBiLinear +from parcels.application_kernels.interpolation import XLinear from parcels.field import Field, VectorField from parcels.fieldset import FieldSet from parcels.xgrid import _FIELD_DATA_ORDERING, XGrid, get_axis_from_dim_name @@ -75,8 +75,8 @@ def create_fieldset_zeros_conversion(mesh_type="spherical", xdim=200, ydim=100) ds["lon"].data = np.linspace(-1e6 * mesh_conversion, 1e6 * mesh_conversion, xdim) ds["lat"].data = np.linspace(-1e6 * mesh_conversion, 1e6 * mesh_conversion, ydim) grid = XGrid.from_dataset(ds) - U = Field("U", ds["U"], grid, mesh_type=mesh_type, interp_method=XBiLinear) - V = Field("V", ds["V"], grid, mesh_type=mesh_type, interp_method=XBiLinear) + U = Field("U", ds["U"], grid, mesh_type=mesh_type, interp_method=XLinear) + V = Field("V", ds["V"], grid, mesh_type=mesh_type, interp_method=XLinear) UV = VectorField("UV", U, V) return FieldSet([U, V, UV]) diff --git a/tests/v4/test_advection.py b/tests/v4/test_advection.py index 3d19d80a4..f8dc7077f 100644 --- a/tests/v4/test_advection.py +++ b/tests/v4/test_advection.py @@ -5,7 +5,7 @@ from parcels._datasets.structured.generated import simple_UV_dataset from parcels.application_kernels.advection import AdvectionEE, AdvectionRK4, AdvectionRK4_3D, AdvectionRK45 from parcels.application_kernels.advectiondiffusion import AdvectionDiffusionEM, AdvectionDiffusionM1 -from parcels.application_kernels.interpolation import XBiLinear, XTriLinear +from parcels.application_kernels.interpolation import XLinear from parcels.field import Field, VectorField from parcels.fieldset import FieldSet from parcels.particle import Particle, Variable @@ -30,8 +30,8 @@ def test_advection_zonal(mesh_type, npart=10): ds = simple_UV_dataset(mesh_type=mesh_type) ds["U"].data[:] = 1.0 grid = XGrid.from_dataset(ds) - U = Field("U", ds["U"], grid, mesh_type=mesh_type, interp_method=XBiLinear) - V = Field("V", ds["V"], grid, mesh_type=mesh_type, interp_method=XBiLinear) + U = Field("U", ds["U"], grid, mesh_type=mesh_type, interp_method=XLinear) + V = Field("V", ds["V"], grid, mesh_type=mesh_type, interp_method=XLinear) UV = VectorField("UV", U, V) fieldset = FieldSet([U, V, UV]) @@ -62,8 +62,8 @@ def test_advection_zonal_periodic(): ds = xr.concat([ds, halo], dim="XG") grid = XGrid.from_dataset(ds) - U = Field("U", ds["U"], grid, interp_method=XBiLinear) - V = Field("V", ds["V"], grid, interp_method=XBiLinear) + U = Field("U", ds["U"], grid, interp_method=XLinear) + V = Field("V", ds["V"], grid, interp_method=XLinear) UV = VectorField("UV", U, V) fieldset = FieldSet([U, V, UV]) @@ -81,9 +81,9 @@ def test_horizontal_advection_in_3D_flow(npart=10): ds = simple_UV_dataset(mesh_type="flat") ds["U"].data[:] = 1.0 grid = XGrid.from_dataset(ds) - U = Field("U", ds["U"], grid, interp_method=XTriLinear) + U = Field("U", ds["U"], grid, interp_method=XLinear) U.data[:, 0, :, :] = 0.0 # Set U to 0 at the surface - V = Field("V", ds["V"], grid, interp_method=XTriLinear) + V = Field("V", ds["V"], grid, interp_method=XLinear) UV = VectorField("UV", U, V) fieldset = FieldSet([U, V, UV]) @@ -99,10 +99,10 @@ def test_horizontal_advection_in_3D_flow(npart=10): def test_advection_3D_outofbounds(direction, wErrorThroughSurface): ds = simple_UV_dataset(mesh_type="flat") grid = XGrid.from_dataset(ds) - U = Field("U", ds["U"], grid, interp_method=XTriLinear) + U = Field("U", ds["U"], grid, interp_method=XLinear) U.data[:] = 0.01 # Set U to small value (to avoid horizontal out of bounds) - V = Field("V", ds["V"], grid, interp_method=XTriLinear) - W = Field("W", ds["V"], grid, interp_method=XTriLinear) # Use V as W for testing + V = Field("V", ds["V"], grid, interp_method=XLinear) + W = Field("W", ds["V"], grid, interp_method=XLinear) # Use V as W for testing W.data[:] = -1.0 if direction == "up" else 1.0 UVW = VectorField("UVW", U, V, W) UV = VectorField("UV", U, V) @@ -178,11 +178,11 @@ def test_length1dimensions(u, v, w): # TODO: Refactor this test to be more read ds["W"] = (["time", "depth", "YG", "XG"], W) grid = XGrid.from_dataset(ds) - U = Field("U", ds["U"], grid, interp_method=XTriLinear) - V = Field("V", ds["V"], grid, interp_method=XTriLinear) + U = Field("U", ds["U"], grid, interp_method=XLinear) + V = Field("V", ds["V"], grid, interp_method=XLinear) fields = [U, V, VectorField("UV", U, V)] if w: - W = Field("W", ds["W"], grid, interp_method=XTriLinear) + W = Field("W", ds["W"], grid, interp_method=XLinear) fields.append(VectorField("UVW", U, V, W)) fieldset = FieldSet(fields) @@ -229,11 +229,11 @@ def truth_moving(x_0, y_0, t): ds["lon"].data = np.array([0, 25000]) ds["lat"].data = np.array([0, 25000]) ds = ds.assign_coords(time=time) - U = Field("U", ds["U"], grid, interp_method=XBiLinear) - V = Field("V", ds["V"], grid, interp_method=XBiLinear) + U = Field("U", ds["U"], grid, interp_method=XLinear) + V = Field("V", ds["V"], grid, interp_method=XLinear) if method == "RK4_3D": # Using W to test 3D advection (assuming same velocity as V) - W = Field("W", ds["V"], grid, interp_method=XTriLinear) + W = Field("W", ds["V"], grid, interp_method=XLinear) UVW = VectorField("UVW", U, V, W) fieldset = FieldSet([U, V, W, UVW]) start_depth = start_lat @@ -249,8 +249,8 @@ def truth_moving(x_0, y_0, t): elif method in ["AdvDiffEM", "AdvDiffM1"]: # Add zero diffusivity field for diffusion kernels ds["Kh"] = (["time", "depth", "YG", "XG"], np.full((len(time), 2, 2, 2), 0)) - fieldset.add_field(Field("Kh", ds["Kh"], grid, interp_method=XBiLinear), "Kh_zonal") - fieldset.add_field(Field("Kh", ds["Kh"], grid, interp_method=XBiLinear), "Kh_meridional") + fieldset.add_field(Field("Kh", ds["Kh"], grid, interp_method=XLinear), "Kh_zonal") + fieldset.add_field(Field("Kh", ds["Kh"], grid, interp_method=XLinear), "Kh_meridional") fieldset.add_constant("dres", 0.1) pclass = RK45Particles if method == "RK45" else Particle diff --git a/tests/v4/test_diffusion.py b/tests/v4/test_diffusion.py index 1deb0bccd..546867c9f 100644 --- a/tests/v4/test_diffusion.py +++ b/tests/v4/test_diffusion.py @@ -6,7 +6,7 @@ from parcels._datasets.structured.generated import simple_UV_dataset from parcels.application_kernels import AdvectionDiffusionEM, AdvectionDiffusionM1, DiffusionUniformKh -from parcels.application_kernels.interpolation import XBiLinear +from parcels.application_kernels.interpolation import XLinear from parcels.field import Field, VectorField from parcels.fieldset import FieldSet from parcels.particle import Particle, Variable @@ -25,12 +25,12 @@ def test_fieldKh_Brownian(mesh_type): ds["lon"].data = np.array([-1e6, 1e6]) ds["lat"].data = np.array([-1e6, 1e6]) grid = XGrid.from_dataset(ds) - U = Field("U", ds["U"], grid, mesh_type=mesh_type, interp_method=XBiLinear) - V = Field("V", ds["V"], grid, mesh_type=mesh_type, interp_method=XBiLinear) + U = Field("U", ds["U"], grid, mesh_type=mesh_type, interp_method=XLinear) + V = Field("V", ds["V"], grid, mesh_type=mesh_type, interp_method=XLinear) ds["Kh_zonal"] = (["time", "depth", "YG", "XG"], np.full((2, 1, 2, 2), kh_zonal)) ds["Kh_meridional"] = (["time", "depth", "YG", "XG"], np.full((2, 1, 2, 2), kh_meridional)) - Kh_zonal = Field("Kh_zonal", ds["Kh_zonal"], grid=grid, mesh_type=mesh_type, interp_method=XBiLinear) - Kh_meridional = Field("Kh_meridional", ds["Kh_meridional"], grid=grid, mesh_type=mesh_type, interp_method=XBiLinear) + Kh_zonal = Field("Kh_zonal", ds["Kh_zonal"], grid=grid, mesh_type=mesh_type, interp_method=XLinear) + Kh_meridional = Field("Kh_meridional", ds["Kh_meridional"], grid=grid, mesh_type=mesh_type, interp_method=XLinear) UV = VectorField("UV", U, V) fieldset = FieldSet([U, V, UV, Kh_zonal, Kh_meridional]) @@ -62,8 +62,8 @@ def test_fieldKh_SpatiallyVaryingDiffusion(mesh_type, kernel): ds["lon"].data = np.linspace(-1e6, 1e6, xdim) ds["lat"].data = np.linspace(-1e6, 1e6, ydim) grid = XGrid.from_dataset(ds) - U = Field("U", ds["U"], grid, mesh_type=mesh_type, interp_method=XBiLinear) - V = Field("V", ds["V"], grid, mesh_type=mesh_type, interp_method=XBiLinear) + U = Field("U", ds["U"], grid, mesh_type=mesh_type, interp_method=XLinear) + V = Field("V", ds["V"], grid, mesh_type=mesh_type, interp_method=XLinear) Kh = np.zeros((ydim, xdim), dtype=np.float32) for x in range(xdim): @@ -71,8 +71,8 @@ def test_fieldKh_SpatiallyVaryingDiffusion(mesh_type, kernel): ds["Kh_zonal"] = (["time", "depth", "YG", "XG"], np.full((2, 1, ydim, xdim), Kh)) ds["Kh_meridional"] = (["time", "depth", "YG", "XG"], np.full((2, 1, ydim, xdim), Kh)) - Kh_zonal = Field("Kh_zonal", ds["Kh_zonal"], grid=grid, mesh_type=mesh_type, interp_method=XBiLinear) - Kh_meridional = Field("Kh_meridional", ds["Kh_meridional"], grid=grid, mesh_type=mesh_type, interp_method=XBiLinear) + Kh_zonal = Field("Kh_zonal", ds["Kh_zonal"], grid=grid, mesh_type=mesh_type, interp_method=XLinear) + Kh_meridional = Field("Kh_meridional", ds["Kh_meridional"], grid=grid, mesh_type=mesh_type, interp_method=XLinear) UV = VectorField("UV", U, V) fieldset = FieldSet([U, V, UV, Kh_zonal, Kh_meridional]) fieldset.add_constant("dres", ds["lon"][1] - ds["lon"][0]) diff --git a/tests/v4/test_interpolation.py b/tests/v4/test_interpolation.py index 316fc2672..0d76442d1 100644 --- a/tests/v4/test_interpolation.py +++ b/tests/v4/test_interpolation.py @@ -3,13 +3,15 @@ import xarray as xr from parcels._datasets.structured.generated import simple_UV_dataset +from parcels._datasets.unstructured.generic import datasets as datasets_unstructured from parcels.application_kernels.advection import AdvectionRK4_3D -from parcels.application_kernels.interpolation import XBiLinear, XTriLinear +from parcels.application_kernels.interpolation import UXPiecewiseLinearNode, XLinear from parcels.field import Field, VectorField from parcels.fieldset import FieldSet from parcels.particle import Particle, Variable from parcels.particleset import ParticleSet from parcels.tools.statuscodes import StatusCode +from parcels.uxgrid import UxGrid from parcels.xgrid import XGrid from tests.utils import TEST_DATA @@ -19,8 +21,8 @@ def test_interpolation_mesh_type(mesh_type, npart=10): ds = simple_UV_dataset(mesh_type=mesh_type) ds["U"].data[:] = 1.0 grid = XGrid.from_dataset(ds) - U = Field("U", ds["U"], grid, mesh_type=mesh_type, interp_method=XBiLinear) - V = Field("V", ds["V"], grid, mesh_type=mesh_type, interp_method=XBiLinear) + U = Field("U", ds["U"], grid, mesh_type=mesh_type, interp_method=XLinear) + V = Field("V", ds["V"], grid, mesh_type=mesh_type, interp_method=XLinear) UV = VectorField("UV", U, V) lat = 30.0 @@ -37,8 +39,21 @@ def test_interpolation_mesh_type(mesh_type, npart=10): assert U.eval(time, 0, lat, 0, applyConversion=False) == 1 +def test_default_interpolator(): + """Test that the default interpolator is set correctly.""" + ds = simple_UV_dataset() + grid = XGrid.from_dataset(ds) + U = Field("U", ds["U"], grid) + assert U.interp_method == XLinear + + ds = datasets_unstructured["stommel_gyre_delaunay"] + grid = UxGrid(grid=ds.uxgrid, z=ds.coords["nz"]) + U = Field("U", ds["U"], grid) + assert U.interp_method == UXPiecewiseLinearNode + + interp_methods = { - "linear": XTriLinear, + "linear": XLinear, } From cac1f1740d313830eb80a817119bac9fb6613c4f Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 31 Jul 2025 17:04:31 +0200 Subject: [PATCH 17/58] simplifying vectorized XLinear interpolation function --- parcels/application_kernels/interpolation.py | 58 ++++++++------------ 1 file changed, 22 insertions(+), 36 deletions(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index 448e86313..805fd7de8 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -53,47 +53,33 @@ def XLinear( data = field.data val = np.zeros_like(tau) - # Get dimension sizes to clip indices - x_size = data.sizes[axis_dim["X"]] - y_size = data.sizes[axis_dim["Y"]] - z_size = data.sizes[axis_dim["Z"]] - t_size = data.sizes["time"] + xii = np.clip(np.stack([xi, xi + 1, xi, xi + 1], axis=-1).flatten(), 0, data.shape[3] - 1) + yii = np.clip(np.stack([yi, yi, yi + 1, yi + 1], axis=-1).flatten(), 0, data.shape[2] - 1) + xi_da = xr.DataArray(xii, dims=("points")) + yi_da = xr.DataArray(yii, dims=("points")) timeslices = [ti, ti + 1] if tau.any() > 0 else [ti] + depth_slices = [zi, zi + 1] if zeta.any() > 0 else [zi] for tii, tau_factor in zip(timeslices, [1 - tau, tau], strict=False): - for zii, depth_factor in zip([zi, zi + 1], [1 - zeta, zeta], strict=False): - # Clip indices to prevent out-of-bounds access - xi_clipped = np.clip(xi, 0, x_size - 1) - yi_clipped = np.clip(yi, 0, y_size - 1) - zii_clipped = np.clip(zii, 0, z_size - 1) - tii_clipped = np.clip(tii, 0, t_size - 1) - - xi_da = xr.DataArray(xi_clipped, dims="points") - yi_da = xr.DataArray(yi_clipped, dims="points") - zinds = xr.DataArray(zii_clipped, dims="points") - tinds = xr.DataArray(tii_clipped, dims="points") - xi_plus1 = xr.DataArray(np.clip(xi_clipped + 1, 0, x_size - 1), dims="points") - yi_plus1 = xr.DataArray(np.clip(yi_clipped + 1, 0, y_size - 1), dims="points") - - # TODO see if this can be done with one isel call, by combining the indices - F00 = data.isel( - {axis_dim["X"]: xi_da, axis_dim["Y"]: yi_da, axis_dim["Z"]: zinds, "time": tinds} - ).values.flatten() - F10 = data.isel( - {axis_dim["X"]: xi_plus1, axis_dim["Y"]: yi_da, axis_dim["Z"]: zinds, "time": tinds} - ).values.flatten() - F01 = data.isel( - {axis_dim["X"]: xi_da, axis_dim["Y"]: yi_plus1, axis_dim["Z"]: zinds, "time": tinds} - ).values.flatten() - F11 = data.isel( - {axis_dim["X"]: xi_plus1, axis_dim["Y"]: yi_plus1, axis_dim["Z"]: zinds, "time": tinds} - ).values.flatten() - val += ( - ((1 - xsi) * (1 - eta) * F00 + xsi * (1 - eta) * F10 + (1 - xsi) * eta * F01 + xsi * eta * F11) - * tau_factor - * depth_factor + tti = np.clip(np.array([tii, tii, tii, tii]).flatten(), 0, data.shape[0] - 1) + ti_da = xr.DataArray(tti, dims=("points")) + for zii, depth_factor in zip(depth_slices, [1 - zeta, zeta], strict=False): + zii = np.clip(np.array([zii, zii, zii, zii]).flatten(), 0, data.shape[1] - 1) + zi_da = xr.DataArray(zii, dims=("points")) + + F = data.isel({axis_dim["X"]: xi_da, axis_dim["Y"]: yi_da, axis_dim["Z"]: zi_da, "time": ti_da}) + F = F.data.reshape(-1, 4) + # TODO check if numpy can handle this more efficiently + # F = data.values[tti, zii, yii, xii].reshape(-1, 4) + interp_val = ( + (1 - xsi) * (1 - eta) * F[:, 0] + + xsi * (1 - eta) * F[:, 1] + + (1 - xsi) * eta * F[:, 2] + + xsi * eta * F[:, 3] ) + val += interp_val * tau_factor * depth_factor + return val From a775671f2cacbf23d4fab2a89265a8b8e553470f Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Mon, 4 Aug 2025 09:26:35 +0200 Subject: [PATCH 18/58] Small speedup in _search_1d_array By broadcasting to avoid repeated array access in bcoord computation Also, adding a note that searching can be much quicker when grid spacing is uniform --- parcels/xgrid.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/parcels/xgrid.py b/parcels/xgrid.py index 4621430c8..b8d3f0c54 100644 --- a/parcels/xgrid.py +++ b/parcels/xgrid.py @@ -491,9 +491,16 @@ def _search_1d_array( if len(arr) < 2: return np.zeros(shape=x.shape, dtype=np.int32), np.zeros_like(x) index = np.searchsorted(arr, x, side="right") - 1 - index_next = np.clip(index + 1, 1, len(arr) - 1) # Ensure we don't go out of bounds - - bcoord = (x - arr[index]) / (arr[index_next] - arr[index]) + # Use broadcasting to avoid repeated array access + arr_index = arr[index] + arr_next = arr[np.clip(index + 1, 1, len(arr) - 1)] # Ensure we don't go out of bounds + bcoord = (x - arr_index) / (arr_next - arr_index) + + # TODO check how we can avoid searchsorted when grid spacing is uniform + # dx = arr[1] - arr[0] + # index = ((x - arr[0]) / dx).astype(int) + # index = np.clip(index, 0, len(arr) - 2) + # bcoord = (x - arr[index]) / dx index = np.where(x < arr[0], LEFT_OUT_OF_BOUNDS, index) index = np.where(x >= arr[-1], RIGHT_OUT_OF_BOUNDS, index) From dae70fb36984d62d4dd6b41f532dc96ac60b9e12 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Mon, 4 Aug 2025 11:16:54 +0200 Subject: [PATCH 19/58] improving XLinear by using only 1 isel call --- parcels/application_kernels/interpolation.py | 74 ++++++++++++-------- 1 file changed, 44 insertions(+), 30 deletions(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index 805fd7de8..ff27e0857 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -51,36 +51,50 @@ def XLinear( axis_dim = field.grid.get_axis_dim_mapping(field.data.dims) data = field.data - val = np.zeros_like(tau) - - xii = np.clip(np.stack([xi, xi + 1, xi, xi + 1], axis=-1).flatten(), 0, data.shape[3] - 1) - yii = np.clip(np.stack([yi, yi, yi + 1, yi + 1], axis=-1).flatten(), 0, data.shape[2] - 1) - xi_da = xr.DataArray(xii, dims=("points")) - yi_da = xr.DataArray(yii, dims=("points")) - - timeslices = [ti, ti + 1] if tau.any() > 0 else [ti] - depth_slices = [zi, zi + 1] if zeta.any() > 0 else [zi] - for tii, tau_factor in zip(timeslices, [1 - tau, tau], strict=False): - tti = np.clip(np.array([tii, tii, tii, tii]).flatten(), 0, data.shape[0] - 1) - ti_da = xr.DataArray(tti, dims=("points")) - for zii, depth_factor in zip(depth_slices, [1 - zeta, zeta], strict=False): - zii = np.clip(np.array([zii, zii, zii, zii]).flatten(), 0, data.shape[1] - 1) - zi_da = xr.DataArray(zii, dims=("points")) - - F = data.isel({axis_dim["X"]: xi_da, axis_dim["Y"]: yi_da, axis_dim["Z"]: zi_da, "time": ti_da}) - F = F.data.reshape(-1, 4) - # TODO check if numpy can handle this more efficiently - # F = data.values[tti, zii, yii, xii].reshape(-1, 4) - interp_val = ( - (1 - xsi) * (1 - eta) * F[:, 0] - + xsi * (1 - eta) * F[:, 1] - + (1 - xsi) * eta * F[:, 2] - + xsi * eta * F[:, 3] - ) - - val += interp_val * tau_factor * depth_factor - - return val + + # Time coordinates: 8 points at ti, then 8 points at ti+1 + ti = np.concatenate([np.repeat(ti, 8), np.repeat(ti + 1, 8)]) + + # Depth coordinates: 4 points at zi, 4 at zi+1, repeated for both time levels + zi = np.tile(np.stack([zi, zi, zi, zi, zi + 1, zi + 1, zi + 1, zi + 1], axis=1).flatten(), 2) + + # Y coordinates: [yi, yi, yi+1, yi+1] pattern repeated + yi = np.tile(np.stack([yi, yi, yi + 1, yi + 1], axis=1).flatten(), 4) + + # X coordinates: [xi, xi+1] for each spatial point, repeated for time/depth + xi = np.tile(np.stack([xi, xi + 1], axis=1).flatten(), 8) + + # Clip indices to valid ranges + ti = np.clip(ti, 0, data.shape[0] - 1) + zi = np.clip(zi, 0, data.shape[1] - 1) + yi = np.clip(yi, 0, data.shape[2] - 1) + xi = np.clip(xi, 0, data.shape[3] - 1) + + # Create DataArrays for indexing + xi_da = xr.DataArray(xi, dims=("points")) + yi_da = xr.DataArray(yi, dims=("points")) + ti_da = xr.DataArray(ti, dims=("points")) + zi_da = xr.DataArray(zi, dims=("points")) + + F = data.isel({axis_dim["X"]: xi_da, axis_dim["Y"]: yi_da, axis_dim["Z"]: zi_da, "time": ti_da}) + F = F.data.reshape(-1, 2, 2, 4) + # TODO check if numpy can handle this more efficiently + # F = data.values[tti, zii, yii, xii].reshape(-1, 4) + + F_t0, F_t1 = F[:, 0, :, :], F[:, 1, :, :] + tau_expanded = tau[:, np.newaxis, np.newaxis] + F_time_interp = F_t0 * (1 - tau_expanded) + F_t1 * tau_expanded + + F_z0, F_z1 = F_time_interp[:, 0, :], F_time_interp[:, 1, :] + zeta_expanded = zeta[:, np.newaxis] + F_final = F_z0 * (1 - zeta_expanded) + F_z1 * zeta_expanded + + return ( + (1 - xsi) * (1 - eta) * F_final[:, 0] + + xsi * (1 - eta) * F_final[:, 1] + + (1 - xsi) * eta * F_final[:, 2] + + xsi * eta * F_final[:, 3] + ) def UXPiecewiseConstantFace( From a0c05732d9931b8cffa016eb2c20f295f73504b8 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Mon, 4 Aug 2025 12:17:35 +0200 Subject: [PATCH 20/58] Further improving XLinear Interpolaiton By only expanding time and depth if needed --- parcels/application_kernels/interpolation.py | 52 ++++++++++++-------- 1 file changed, 31 insertions(+), 21 deletions(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index ff27e0857..6d622381c 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -52,17 +52,26 @@ def XLinear( axis_dim = field.grid.get_axis_dim_mapping(field.data.dims) data = field.data + lenT = 2 if any(tau > 0) else 1 + lenZ = 2 if any(zeta > 0) else 1 + # Time coordinates: 8 points at ti, then 8 points at ti+1 - ti = np.concatenate([np.repeat(ti, 8), np.repeat(ti + 1, 8)]) + if lenT == 1: + ti = np.repeat(ti, lenZ * 4) + else: + ti = np.concatenate([np.repeat(ti, lenZ * 4), np.repeat(ti + 1, lenZ * 4)]) # Depth coordinates: 4 points at zi, 4 at zi+1, repeated for both time levels - zi = np.tile(np.stack([zi, zi, zi, zi, zi + 1, zi + 1, zi + 1, zi + 1], axis=1).flatten(), 2) + if lenZ == 1: + zi = np.repeat(zi, lenT * 4) + else: + zi = np.tile(np.stack([zi, zi, zi, zi, zi + 1, zi + 1, zi + 1, zi + 1], axis=1).flatten(), lenT) # Y coordinates: [yi, yi, yi+1, yi+1] pattern repeated - yi = np.tile(np.stack([yi, yi, yi + 1, yi + 1], axis=1).flatten(), 4) + yi = np.tile(np.stack([yi, yi, yi + 1, yi + 1], axis=1).flatten(), (lenT) * (lenZ)) # X coordinates: [xi, xi+1] for each spatial point, repeated for time/depth - xi = np.tile(np.stack([xi, xi + 1], axis=1).flatten(), 8) + xi = np.tile(np.stack([xi, xi + 1], axis=1).flatten(), 2 * (lenT) * (lenZ)) # Clip indices to valid ranges ti = np.clip(ti, 0, data.shape[0] - 1) @@ -71,30 +80,31 @@ def XLinear( xi = np.clip(xi, 0, data.shape[3] - 1) # Create DataArrays for indexing - xi_da = xr.DataArray(xi, dims=("points")) - yi_da = xr.DataArray(yi, dims=("points")) ti_da = xr.DataArray(ti, dims=("points")) zi_da = xr.DataArray(zi, dims=("points")) + yi_da = xr.DataArray(yi, dims=("points")) + xi_da = xr.DataArray(xi, dims=("points")) F = data.isel({axis_dim["X"]: xi_da, axis_dim["Y"]: yi_da, axis_dim["Z"]: zi_da, "time": ti_da}) - F = F.data.reshape(-1, 2, 2, 4) + F = F.data.reshape(-1, lenT, lenZ, 4) # TODO check if numpy can handle this more efficiently # F = data.values[tti, zii, yii, xii].reshape(-1, 4) - F_t0, F_t1 = F[:, 0, :, :], F[:, 1, :, :] - tau_expanded = tau[:, np.newaxis, np.newaxis] - F_time_interp = F_t0 * (1 - tau_expanded) + F_t1 * tau_expanded - - F_z0, F_z1 = F_time_interp[:, 0, :], F_time_interp[:, 1, :] - zeta_expanded = zeta[:, np.newaxis] - F_final = F_z0 * (1 - zeta_expanded) + F_z1 * zeta_expanded - - return ( - (1 - xsi) * (1 - eta) * F_final[:, 0] - + xsi * (1 - eta) * F_final[:, 1] - + (1 - xsi) * eta * F_final[:, 2] - + xsi * eta * F_final[:, 3] - ) + if lenT == 2: + F_t0, F_t1 = F[:, 0, :, :], F[:, 1, :, :] + tau_expanded = tau[:, np.newaxis, np.newaxis] + F = F_t0 * (1 - tau_expanded) + F_t1 * tau_expanded + else: + F = F[:, 0, :, :] + + if lenZ == 2: + F_z0, F_z1 = F[:, 0, :], F[:, 1, :] + zeta_expanded = zeta[:, np.newaxis] + F = F_z0 * (1 - zeta_expanded) + F_z1 * zeta_expanded + else: + F = F[:, 0, :] + + return (1 - xsi) * (1 - eta) * F[:, 0] + xsi * (1 - eta) * F[:, 1] + (1 - xsi) * eta * F[:, 2] + xsi * eta * F[:, 3] def UXPiecewiseConstantFace( From 21d7c737372034a4665f570e590a4b89ecf20b41 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Mon, 4 Aug 2025 13:52:37 +0200 Subject: [PATCH 21/58] Further speeding up access pattern creation in XLinear --- parcels/application_kernels/interpolation.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index 6d622381c..cb92f410d 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -52,8 +52,8 @@ def XLinear( axis_dim = field.grid.get_axis_dim_mapping(field.data.dims) data = field.data - lenT = 2 if any(tau > 0) else 1 - lenZ = 2 if any(zeta > 0) else 1 + lenT = 2 if np.any(tau > 0) else 1 + lenZ = 2 if np.any(zeta > 0) else 1 # Time coordinates: 8 points at ti, then 8 points at ti+1 if lenT == 1: @@ -68,10 +68,10 @@ def XLinear( zi = np.tile(np.stack([zi, zi, zi, zi, zi + 1, zi + 1, zi + 1, zi + 1], axis=1).flatten(), lenT) # Y coordinates: [yi, yi, yi+1, yi+1] pattern repeated - yi = np.tile(np.stack([yi, yi, yi + 1, yi + 1], axis=1).flatten(), (lenT) * (lenZ)) + yi = np.tile(np.repeat([yi, yi + 1], 2), (lenT) * (lenZ)) # X coordinates: [xi, xi+1] for each spatial point, repeated for time/depth - xi = np.tile(np.stack([xi, xi + 1], axis=1).flatten(), 2 * (lenT) * (lenZ)) + xi = np.repeat([xi, xi + 1], 2 * (lenT) * (lenZ)) # Clip indices to valid ranges ti = np.clip(ti, 0, data.shape[0] - 1) From 587bc6e662e6b85769655d2337cd67acf57057c0 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Mon, 4 Aug 2025 13:56:37 +0200 Subject: [PATCH 22/58] speeding up XLinear by reducing clipping operation --- parcels/application_kernels/interpolation.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index cb92f410d..d35c677ff 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -59,25 +59,23 @@ def XLinear( if lenT == 1: ti = np.repeat(ti, lenZ * 4) else: - ti = np.concatenate([np.repeat(ti, lenZ * 4), np.repeat(ti + 1, lenZ * 4)]) + ti_1 = np.clip(ti + 1, 0, data.shape[0] - 1) + ti = np.concatenate([np.repeat(ti, lenZ * 4), np.repeat(ti_1, lenZ * 4)]) # Depth coordinates: 4 points at zi, 4 at zi+1, repeated for both time levels if lenZ == 1: zi = np.repeat(zi, lenT * 4) else: - zi = np.tile(np.stack([zi, zi, zi, zi, zi + 1, zi + 1, zi + 1, zi + 1], axis=1).flatten(), lenT) + zi_1 = np.clip(zi + 1, 0, data.shape[1] - 1) + zi = np.tile(np.stack([zi, zi, zi, zi, zi_1, zi_1, zi_1, zi_1], axis=1).flatten(), lenT) # Y coordinates: [yi, yi, yi+1, yi+1] pattern repeated - yi = np.tile(np.repeat([yi, yi + 1], 2), (lenT) * (lenZ)) + yi_1 = np.clip(yi + 1, 0, data.shape[2] - 1) + yi = np.tile(np.repeat([yi, yi_1], 2), (lenT) * (lenZ)) # X coordinates: [xi, xi+1] for each spatial point, repeated for time/depth - xi = np.repeat([xi, xi + 1], 2 * (lenT) * (lenZ)) - - # Clip indices to valid ranges - ti = np.clip(ti, 0, data.shape[0] - 1) - zi = np.clip(zi, 0, data.shape[1] - 1) - yi = np.clip(yi, 0, data.shape[2] - 1) - xi = np.clip(xi, 0, data.shape[3] - 1) + xi_1 = np.clip(xi + 1, 0, data.shape[3] - 1) + xi = np.repeat([xi, xi_1], 2 * (lenT) * (lenZ)) # Create DataArrays for indexing ti_da = xr.DataArray(ti, dims=("points")) From dd9bfd215b4acced76ff3e6b3e7d00e511781847 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Mon, 4 Aug 2025 16:25:24 +0200 Subject: [PATCH 23/58] Calling dask.compute() at end of XInterpolation --- parcels/application_kernels/interpolation.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index d35c677ff..33b3ea0dc 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -4,6 +4,7 @@ from typing import TYPE_CHECKING +import dask.array as dask import numpy as np import xarray as xr @@ -102,7 +103,10 @@ def XLinear( else: F = F[:, 0, :] - return (1 - xsi) * (1 - eta) * F[:, 0] + xsi * (1 - eta) * F[:, 1] + (1 - xsi) * eta * F[:, 2] + xsi * eta * F[:, 3] + value = ( + (1 - xsi) * (1 - eta) * F[:, 0] + xsi * (1 - eta) * F[:, 1] + (1 - xsi) * eta * F[:, 2] + xsi * eta * F[:, 3] + ) + return value.compute() if isinstance(value, dask.Array) else value def UXPiecewiseConstantFace( From 92c3d820f952a3165df0209bbe4c59e735536dd6 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Mon, 4 Aug 2025 17:28:51 +0200 Subject: [PATCH 24/58] Fixing bug in creation of xi and yi indices Speeding this up to be explored later... --- parcels/application_kernels/interpolation.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index 33b3ea0dc..08c992dd9 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -71,12 +71,14 @@ def XLinear( zi = np.tile(np.stack([zi, zi, zi, zi, zi_1, zi_1, zi_1, zi_1], axis=1).flatten(), lenT) # Y coordinates: [yi, yi, yi+1, yi+1] pattern repeated + # TODO consider using np.repeat for better performance yi_1 = np.clip(yi + 1, 0, data.shape[2] - 1) - yi = np.tile(np.repeat([yi, yi_1], 2), (lenT) * (lenZ)) + yi = np.tile(np.stack([yi, yi, yi_1, yi_1], axis=1).flatten(), (lenT) * (lenZ)) # X coordinates: [xi, xi+1] for each spatial point, repeated for time/depth + # TODO consider using np.repeat for better performance xi_1 = np.clip(xi + 1, 0, data.shape[3] - 1) - xi = np.repeat([xi, xi_1], 2 * (lenT) * (lenZ)) + xi = np.tile(np.stack([xi, xi_1, xi, xi_1], axis=1).flatten(), (lenT) * (lenZ)) # Create DataArrays for indexing ti_da = xr.DataArray(ti, dims=("points")) @@ -87,7 +89,7 @@ def XLinear( F = data.isel({axis_dim["X"]: xi_da, axis_dim["Y"]: yi_da, axis_dim["Z"]: zi_da, "time": ti_da}) F = F.data.reshape(-1, lenT, lenZ, 4) # TODO check if numpy can handle this more efficiently - # F = data.values[tti, zii, yii, xii].reshape(-1, 4) + # F = data.values[ti, zi, yi, xi].reshape(-1, lenT, lenZ, 4) if lenT == 2: F_t0, F_t1 = F[:, 0, :, :], F[:, 1, :, :] From 9bd07f70a22d60373aa868f79990507a5be8e682 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Mon, 4 Aug 2025 18:03:17 +0200 Subject: [PATCH 25/58] Fixing vectorized kernels from running particles that start after endtime --- parcels/kernel.py | 2 +- tests/v4/test_particleset_execute.py | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/parcels/kernel.py b/parcels/kernel.py index 04cbe39dc..a5bdedcd5 100644 --- a/parcels/kernel.py +++ b/parcels/kernel.py @@ -296,7 +296,7 @@ def evaluate_pset(self, pset, endtime): except KeyError: pset.dt = np.where( sign_dt * (endtime - pset.time_nextloop) <= pset.dt, - sign_dt * (endtime - pset.time_nextloop), + np.where(sign_dt * (endtime - pset.time_nextloop) < 0, 0, sign_dt * (endtime - pset.time_nextloop)), pset.dt, ) res = None diff --git a/tests/v4/test_particleset_execute.py b/tests/v4/test_particleset_execute.py index 5daefa098..4a327c7cb 100644 --- a/tests/v4/test_particleset_execute.py +++ b/tests/v4/test_particleset_execute.py @@ -85,6 +85,22 @@ def test_execution_endtime(fieldset, starttime, endtime, dt): assert abs(pset.time_nextloop - endtime) < np.timedelta64(1, "ms") +@pytest.mark.parametrize("direction", [-1, 1]) +def test_pset_starttime_outside_execute(fieldset, direction): + if direction == 1: + endtime = fieldset.time_interval.left + np.timedelta64(8, "s") + start_times = [fieldset.time_interval.left + np.timedelta64(t, "s") for t in [0, 2, 10]] + else: + endtime = fieldset.time_interval.right - np.timedelta64(8, "s") + start_times = [fieldset.time_interval.right - np.timedelta64(t, "s") for t in [0, 2, 10]] + + pset = ParticleSet(fieldset, lon=np.zeros(len(start_times)), lat=np.zeros(len(start_times)), time=start_times) + + pset.execute(DoNothing, dt=direction * np.timedelta64(1, "s"), endtime=endtime) + assert pset.time_nextloop[0:1] == endtime + assert pset.time_nextloop[2] == start_times[2] + + @pytest.mark.parametrize( "starttime, runtime, dt", [(0, 10, 1), (0, 10, 3), (2, 16, 3), (20, 10, -1), (20, 0, -2), (5, 15, None)], From 067d08210d2ad204817cc8b9371af52832554b19 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Tue, 5 Aug 2025 08:02:12 +0200 Subject: [PATCH 26/58] Fixing xi and yi calculations --- parcels/application_kernels/interpolation.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index 08c992dd9..c0f8caab5 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -71,14 +71,12 @@ def XLinear( zi = np.tile(np.stack([zi, zi, zi, zi, zi_1, zi_1, zi_1, zi_1], axis=1).flatten(), lenT) # Y coordinates: [yi, yi, yi+1, yi+1] pattern repeated - # TODO consider using np.repeat for better performance yi_1 = np.clip(yi + 1, 0, data.shape[2] - 1) - yi = np.tile(np.stack([yi, yi, yi_1, yi_1], axis=1).flatten(), (lenT) * (lenZ)) + yi = np.tile(np.repeat(np.column_stack([yi, yi_1]), 2), (lenT) * (lenZ)) # X coordinates: [xi, xi+1] for each spatial point, repeated for time/depth - # TODO consider using np.repeat for better performance xi_1 = np.clip(xi + 1, 0, data.shape[3] - 1) - xi = np.tile(np.stack([xi, xi_1, xi, xi_1], axis=1).flatten(), (lenT) * (lenZ)) + xi = np.repeat(np.column_stack([xi, xi_1]), 2 * (lenT) * (lenZ)) # Create DataArrays for indexing ti_da = xr.DataArray(ti, dims=("points")) From 9cd1d8ed0544dfcd04ba71ed61cff7a68e9fa082 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Tue, 5 Aug 2025 08:12:16 +0200 Subject: [PATCH 27/58] Using numpy for field indexing (instead of slower xarray.isel) --- parcels/application_kernels/interpolation.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index c0f8caab5..f026c6459 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -6,7 +6,6 @@ import dask.array as dask import numpy as np -import xarray as xr if TYPE_CHECKING: from parcels.field import Field @@ -50,7 +49,6 @@ def XLinear( yi, eta = position["Y"] zi, zeta = position["Z"] - axis_dim = field.grid.get_axis_dim_mapping(field.data.dims) data = field.data lenT = 2 if np.any(tau > 0) else 1 @@ -78,16 +76,7 @@ def XLinear( xi_1 = np.clip(xi + 1, 0, data.shape[3] - 1) xi = np.repeat(np.column_stack([xi, xi_1]), 2 * (lenT) * (lenZ)) - # Create DataArrays for indexing - ti_da = xr.DataArray(ti, dims=("points")) - zi_da = xr.DataArray(zi, dims=("points")) - yi_da = xr.DataArray(yi, dims=("points")) - xi_da = xr.DataArray(xi, dims=("points")) - - F = data.isel({axis_dim["X"]: xi_da, axis_dim["Y"]: yi_da, axis_dim["Z"]: zi_da, "time": ti_da}) - F = F.data.reshape(-1, lenT, lenZ, 4) - # TODO check if numpy can handle this more efficiently - # F = data.values[ti, zi, yi, xi].reshape(-1, lenT, lenZ, 4) + F = data.values[ti, zi, yi, xi].reshape(-1, lenT, lenZ, 4) if lenT == 2: F_t0, F_t1 = F[:, 0, :, :], F[:, 1, :, :] From 26ec76fc650e0d60332213f0c788e7851eec10e5 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Tue, 5 Aug 2025 08:15:31 +0200 Subject: [PATCH 28/58] Reducing number of support variables As these may have a large memory overhead for big particlesets (since they sclae with `npart`) --- parcels/application_kernels/interpolation.py | 21 ++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index f026c6459..275b549e4 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -76,24 +76,25 @@ def XLinear( xi_1 = np.clip(xi + 1, 0, data.shape[3] - 1) xi = np.repeat(np.column_stack([xi, xi_1]), 2 * (lenT) * (lenZ)) - F = data.values[ti, zi, yi, xi].reshape(-1, lenT, lenZ, 4) + corner_data = data.values[ti, zi, yi, xi].reshape(-1, lenT, lenZ, 4) if lenT == 2: - F_t0, F_t1 = F[:, 0, :, :], F[:, 1, :, :] - tau_expanded = tau[:, np.newaxis, np.newaxis] - F = F_t0 * (1 - tau_expanded) + F_t1 * tau_expanded + tau = tau[:, np.newaxis, np.newaxis] + corner_data = corner_data[:, 0, :, :] * (1 - tau) + corner_data[:, 1, :, :] * tau else: - F = F[:, 0, :, :] + corner_data = corner_data[:, 0, :, :] if lenZ == 2: - F_z0, F_z1 = F[:, 0, :], F[:, 1, :] - zeta_expanded = zeta[:, np.newaxis] - F = F_z0 * (1 - zeta_expanded) + F_z1 * zeta_expanded + zeta = zeta[:, np.newaxis] + corner_data = corner_data[:, 0, :] * (1 - zeta) + corner_data[:, 1, :] * zeta else: - F = F[:, 0, :] + corner_data = corner_data[:, 0, :] value = ( - (1 - xsi) * (1 - eta) * F[:, 0] + xsi * (1 - eta) * F[:, 1] + (1 - xsi) * eta * F[:, 2] + xsi * eta * F[:, 3] + (1 - xsi) * (1 - eta) * corner_data[:, 0] + + xsi * (1 - eta) * corner_data[:, 1] + + (1 - xsi) * eta * corner_data[:, 2] + + xsi * eta * corner_data[:, 3] ) return value.compute() if isinstance(value, dask.Array) else value From ded4ebdf1a9d897a39f57ef9d0ae77c4717741e6 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Tue, 5 Aug 2025 09:51:50 +0200 Subject: [PATCH 29/58] Reverting back to xarray.isel, as dask only works this way(?) Doing direct numpy selection leads to an ndarray, and this is much slower on big fieldsets --- parcels/application_kernels/interpolation.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index 275b549e4..17263849d 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -6,6 +6,7 @@ import dask.array as dask import numpy as np +import xarray as xr if TYPE_CHECKING: from parcels.field import Field @@ -49,6 +50,7 @@ def XLinear( yi, eta = position["Y"] zi, zeta = position["Z"] + axis_dim = field.grid.get_axis_dim_mapping(field.data.dims) data = field.data lenT = 2 if np.any(tau > 0) else 1 @@ -76,7 +78,14 @@ def XLinear( xi_1 = np.clip(xi + 1, 0, data.shape[3] - 1) xi = np.repeat(np.column_stack([xi, xi_1]), 2 * (lenT) * (lenZ)) - corner_data = data.values[ti, zi, yi, xi].reshape(-1, lenT, lenZ, 4) + # Create DataArrays for indexing + ti_da = xr.DataArray(ti, dims=("points")) + zi_da = xr.DataArray(zi, dims=("points")) + yi_da = xr.DataArray(yi, dims=("points")) + xi_da = xr.DataArray(xi, dims=("points")) + + corner_data = data.isel({axis_dim["X"]: xi_da, axis_dim["Y"]: yi_da, axis_dim["Z"]: zi_da, "time": ti_da}) + corner_data = corner_data.data.reshape(-1, lenT, lenZ, 4) if lenT == 2: tau = tau[:, np.newaxis, np.newaxis] From a55db9178bc2856e630d83ddfdab4e3891fe5a27 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Tue, 5 Aug 2025 16:02:18 +0200 Subject: [PATCH 30/58] Another fix to interpolation xi vector generation --- parcels/application_kernels/interpolation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index 17263849d..15f394d24 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -76,7 +76,7 @@ def XLinear( # X coordinates: [xi, xi+1] for each spatial point, repeated for time/depth xi_1 = np.clip(xi + 1, 0, data.shape[3] - 1) - xi = np.repeat(np.column_stack([xi, xi_1]), 2 * (lenT) * (lenZ)) + xi = np.tile(np.column_stack([xi, xi_1, xi, xi_1]).flatten(), (lenT) * (lenZ)) # Create DataArrays for indexing ti_da = xr.DataArray(ti, dims=("points")) From 4c89533da79f5197006ee084e048da8a5279cc1d Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Tue, 5 Aug 2025 16:59:32 +0200 Subject: [PATCH 31/58] Extending tests in test_particleset_execute also to more particles --- tests/v4/test_particleset_execute.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/v4/test_particleset_execute.py b/tests/v4/test_particleset_execute.py index 4a327c7cb..c056d4f4e 100644 --- a/tests/v4/test_particleset_execute.py +++ b/tests/v4/test_particleset_execute.py @@ -43,8 +43,9 @@ def DeleteKernel(particle, fieldset, time): # pragma: no cover assert pset.size == 80 -def test_pset_stop_simulation(fieldset): - pset = ParticleSet(fieldset, lon=0, lat=0, pclass=Particle) +@pytest.mark.parametrize("npart", [1, 100]) +def test_pset_stop_simulation(fieldset, npart): + pset = ParticleSet(fieldset, lon=np.zeros(npart), lat=np.zeros(npart), pclass=Particle) def Delete(particle, fieldset, time): # pragma: no cover if time >= fieldset.time_interval.left + np.timedelta64(4, "s"): @@ -105,17 +106,19 @@ def test_pset_starttime_outside_execute(fieldset, direction): "starttime, runtime, dt", [(0, 10, 1), (0, 10, 3), (2, 16, 3), (20, 10, -1), (20, 0, -2), (5, 15, None)], ) -def test_execution_runtime(fieldset, starttime, runtime, dt): +@pytest.mark.parametrize("npart", [1, 10]) +def test_execution_runtime(fieldset, starttime, runtime, dt, npart): starttime = fieldset.time_interval.left + np.timedelta64(starttime, "s") runtime = np.timedelta64(runtime, "s") sign_dt = 1 if dt is None else np.sign(dt) dt = np.timedelta64(dt, "s") - pset = ParticleSet(fieldset, time=starttime, lon=0, lat=0) + pset = ParticleSet(fieldset, time=starttime, lon=np.zeros(npart), lat=np.zeros(npart)) pset.execute(DoNothing, runtime=runtime, dt=dt) - assert abs(pset.time_nextloop - starttime - runtime * sign_dt) < np.timedelta64(1, "ms") + assert all([abs(p.time_nextloop - starttime - runtime * sign_dt) < np.timedelta64(1, "ms") for p in pset]) -def test_execution_fail_python_exception(fieldset, npart=10): +@pytest.mark.parametrize("npart", [1, 100]) +def test_execution_fail_python_exception(fieldset, npart): pset = ParticleSet(fieldset, lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart)) def PythonFail(particle, fieldset, time): # pragma: no cover From 95f0726a9aec0666f20f4e17b9c601e9c01eeeb1 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 6 Aug 2025 07:13:54 +0200 Subject: [PATCH 32/58] Fixing bug in reshaping of corner_data in XLinear --- parcels/application_kernels/interpolation.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index 15f394d24..03e28c1d4 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -85,19 +85,19 @@ def XLinear( xi_da = xr.DataArray(xi, dims=("points")) corner_data = data.isel({axis_dim["X"]: xi_da, axis_dim["Y"]: yi_da, axis_dim["Z"]: zi_da, "time": ti_da}) - corner_data = corner_data.data.reshape(-1, lenT, lenZ, 4) + corner_data = corner_data.data.reshape(lenT, lenZ, -1, 4) if lenT == 2: - tau = tau[:, np.newaxis, np.newaxis] - corner_data = corner_data[:, 0, :, :] * (1 - tau) + corner_data[:, 1, :, :] * tau + tau = tau[np.newaxis, :, np.newaxis] + corner_data = corner_data[0, :, :, :] * (1 - tau) + corner_data[1, :, :, :] * tau else: - corner_data = corner_data[:, 0, :, :] + corner_data = corner_data[0, :, :, :] if lenZ == 2: zeta = zeta[:, np.newaxis] - corner_data = corner_data[:, 0, :] * (1 - zeta) + corner_data[:, 1, :] * zeta + corner_data = corner_data[0, :, :] * (1 - zeta) + corner_data[1, :, :] * zeta else: - corner_data = corner_data[:, 0, :] + corner_data = corner_data[0, :, :] value = ( (1 - xsi) * (1 - eta) * corner_data[:, 0] From e0faea6ae45062a51d0a83ca541b2fcd0b30b504 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 6 Aug 2025 11:47:12 +0200 Subject: [PATCH 33/58] Fix to how zi array is compiled in XLinear --- parcels/application_kernels/interpolation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index 03e28c1d4..b52d5843e 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -68,7 +68,7 @@ def XLinear( zi = np.repeat(zi, lenT * 4) else: zi_1 = np.clip(zi + 1, 0, data.shape[1] - 1) - zi = np.tile(np.stack([zi, zi, zi, zi, zi_1, zi_1, zi_1, zi_1], axis=1).flatten(), lenT) + zi = np.tile(np.array([zi, zi, zi, zi, zi_1, zi_1, zi_1, zi_1]).flatten(), lenT) # Y coordinates: [yi, yi, yi+1, yi+1] pattern repeated yi_1 = np.clip(yi + 1, 0, data.shape[2] - 1) From 408bcf6b45ddb07a4f1346d5facfa0c41d60d2af Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 6 Aug 2025 17:38:06 +0200 Subject: [PATCH 34/58] Clarifying docstring in XLinear interpolation --- parcels/application_kernels/interpolation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index b52d5843e..161e5f2d0 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -70,11 +70,11 @@ def XLinear( zi_1 = np.clip(zi + 1, 0, data.shape[1] - 1) zi = np.tile(np.array([zi, zi, zi, zi, zi_1, zi_1, zi_1, zi_1]).flatten(), lenT) - # Y coordinates: [yi, yi, yi+1, yi+1] pattern repeated + # Y coordinates: [yi, yi, yi+1, yi+1] for each spatial point, repeated for time/depth yi_1 = np.clip(yi + 1, 0, data.shape[2] - 1) yi = np.tile(np.repeat(np.column_stack([yi, yi_1]), 2), (lenT) * (lenZ)) - # X coordinates: [xi, xi+1] for each spatial point, repeated for time/depth + # X coordinates: [xi, xi+1, xi, xi+1] for each spatial point, repeated for time/depth xi_1 = np.clip(xi + 1, 0, data.shape[3] - 1) xi = np.tile(np.column_stack([xi, xi_1, xi, xi_1]).flatten(), (lenT) * (lenZ)) From db776fb59333dd158a35a2cdaeb02cf76c299ff7 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 7 Aug 2025 08:41:36 +0200 Subject: [PATCH 35/58] fixing merge conflict errors --- parcels/_datasets/structured/generated.py | 4 ++-- parcels/application_kernels/advection.py | 14 ++++++++------ parcels/kernel.py | 7 +++++-- parcels/particleset.py | 2 +- tests/v4/test_advection.py | 12 +++--------- 5 files changed, 19 insertions(+), 20 deletions(-) diff --git a/parcels/_datasets/structured/generated.py b/parcels/_datasets/structured/generated.py index ac9d3fcad..0a91f6864 100644 --- a/parcels/_datasets/structured/generated.py +++ b/parcels/_datasets/structured/generated.py @@ -29,8 +29,8 @@ def radial_rotation_dataset(xdim=200, ydim=200): # Define 2D flat, square field x0 = 30.0 # Define the origin to be the centre of the Field. y0 = 30.0 - U = np.zeros((1, 1, ydim, xdim), dtype=np.float32) - V = np.zeros((1, 1, ydim, xdim), dtype=np.float32) + U = np.zeros((2, 1, ydim, xdim), dtype=np.float32) + V = np.zeros((2, 1, ydim, xdim), dtype=np.float32) omega = 2 * np.pi / 86400.0 # Define the rotational period as 1 day. diff --git a/parcels/application_kernels/advection.py b/parcels/application_kernels/advection.py index d21111d98..01d9a07e1 100644 --- a/parcels/application_kernels/advection.py +++ b/parcels/application_kernels/advection.py @@ -116,13 +116,15 @@ def AdvectionRK45(particle, fieldset, time): # pragma: no cover and doubled if error is smaller than 1/10th of tolerance. """ dt = particle.next_dt / np.timedelta64(1, "s") # TODO: improve API for converting dt to seconds - if dt > fieldset.RK45_max_dt: - dt = fieldset.RK45_max_dt - particle.next_dt = fieldset.RK45_max_dt * np.timedelta64(1, "s") - if dt < fieldset.RK45_min_dt: - particle.next_dt = fieldset.RK45_min_dt * np.timedelta64(1, "s") - return StatusCode.Repeat + particle.next_dt = np.where( + dt > fieldset.RK45_max_dt, fieldset.RK45_max_dt * np.timedelta64(1, "s"), particle.next_dt + ) + particle.next_dt = np.where( + dt < fieldset.RK45_min_dt, fieldset.RK45_min_dt * np.timedelta64(1, "s"), particle.next_dt + ) particle.dt = particle.next_dt + particle.state = np.where(dt < fieldset.RK45_min_dt, StatusCode.Repeat, particle.state) + dt = np.where(dt > fieldset.RK45_max_dt, fieldset.RK45_max_dt, dt) c = [1.0 / 4.0, 3.0 / 8.0, 12.0 / 13.0, 1.0, 1.0 / 2.0] A = [ diff --git a/parcels/kernel.py b/parcels/kernel.py index a7eb9edac..c7e4b9b98 100644 --- a/parcels/kernel.py +++ b/parcels/kernel.py @@ -291,8 +291,11 @@ def evaluate_pset(self, pset, endtime): pre_dt = pset.dt try: # Use next_dt from AdvectionRK45 if it is set - if abs(endtime - pset.time_nextloop) < abs(pset.next_dt) - np.timedelta64(1000, "ns"): - pset.next_dt = sign_dt * (endtime - pset.time_nextloop) + pset.next_dt = np.where( + sign_dt * (endtime - pset.time_nextloop) <= pset.next_dt, + np.where(sign_dt * (endtime - pset.time_nextloop) < 0, 0, sign_dt * (endtime - pset.time_nextloop)), + pset.next_dt, + ) except KeyError: pset.dt = np.where( sign_dt * (endtime - pset.time_nextloop) <= pset.dt, diff --git a/parcels/particleset.py b/parcels/particleset.py index 9da6383de..8d2e4b48f 100644 --- a/parcels/particleset.py +++ b/parcels/particleset.py @@ -150,7 +150,7 @@ def __init__( if isinstance(v.initial, attrgetter): initial = v.initial(self) else: - initial = [np.array(v.initial, dtype=v.dtype)] * len(trajectory_ids) + initial = np.repeat(v.initial, len(trajectory_ids)).astype(v.dtype) self._data[v.name] = initial # update initial values provided on ParticleSet creation diff --git a/tests/v4/test_advection.py b/tests/v4/test_advection.py index 1a3c873f1..a571397c7 100644 --- a/tests/v4/test_advection.py +++ b/tests/v4/test_advection.py @@ -79,9 +79,9 @@ def test_advection_zonal_periodic(): startlon = np.array([0.5, 0.4]) pset = ParticleSet(fieldset, pclass=PeriodicParticle, lon=startlon, lat=[0.5, 0.5]) pset.execute([AdvectionEE, periodicBC], runtime=np.timedelta64(40, "s"), dt=np.timedelta64(1, "s")) - np.testing.assertallclose(pset.total_dlon, 4, atol=1e-5) - np.testing.assertallclose(pset.lon_nextloop, startlon, atol=1e-5) - np.testing.assertallclose(pset.lat_nextloop, 0.5, atol=1e-5) + np.testing.assert_allclose(pset.total_dlon, 4, atol=1e-5) + np.testing.assert_allclose(pset.lon_nextloop, startlon, atol=1e-5) + np.testing.assert_allclose(pset.lat_nextloop, 0.5, atol=1e-5) def test_horizontal_advection_in_3D_flow(npart=10): @@ -268,12 +268,6 @@ def test_moving_eddy(method, rtol): # Use RK45Particles to set next_dt RK45Particles = Particle.add_variable(Variable("next_dt", initial=dt, dtype=np.timedelta64)) fieldset.add_constant("RK45_tol", 1e-6) - elif method in ["AdvDiffEM", "AdvDiffM1"]: - # Add zero diffusivity field for diffusion kernels - ds["Kh"] = (["time", "depth", "YG", "XG"], np.full((len(ds["time"]), 2, 2, 2), 0)) - fieldset.add_field(Field("Kh", ds["Kh"], grid, interp_method=XLinear), "Kh_zonal") - fieldset.add_field(Field("Kh", ds["Kh"], grid, interp_method=XLinear), "Kh_meridional") - fieldset.add_constant("dres", 0.1) pclass = RK45Particles if method == "RK45" else Particle pset = ParticleSet( From a8e4e6854f40502aaaa463984345369ebbd5c405 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 7 Aug 2025 11:23:54 +0200 Subject: [PATCH 36/58] fix CLinear interpolation for grids without depth or time dimension --- parcels/application_kernels/interpolation.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/parcels/application_kernels/interpolation.py b/parcels/application_kernels/interpolation.py index 161e5f2d0..1df817a2a 100644 --- a/parcels/application_kernels/interpolation.py +++ b/parcels/application_kernels/interpolation.py @@ -79,13 +79,16 @@ def XLinear( xi = np.tile(np.column_stack([xi, xi_1, xi, xi_1]).flatten(), (lenT) * (lenZ)) # Create DataArrays for indexing - ti_da = xr.DataArray(ti, dims=("points")) - zi_da = xr.DataArray(zi, dims=("points")) - yi_da = xr.DataArray(yi, dims=("points")) - xi_da = xr.DataArray(xi, dims=("points")) - - corner_data = data.isel({axis_dim["X"]: xi_da, axis_dim["Y"]: yi_da, axis_dim["Z"]: zi_da, "time": ti_da}) - corner_data = corner_data.data.reshape(lenT, lenZ, -1, 4) + selection_dict = { + axis_dim["X"]: xr.DataArray(xi, dims=("points")), + axis_dim["Y"]: xr.DataArray(yi, dims=("points")), + } + if "Z" in axis_dim: + selection_dict[axis_dim["Z"]] = xr.DataArray(zi, dims=("points")) + if "time" in data.dims: + selection_dict["time"] = xr.DataArray(ti, dims=("points")) + + corner_data = data.isel(selection_dict).data.reshape(lenT, lenZ, len(xsi), 4) if lenT == 2: tau = tau[np.newaxis, :, np.newaxis] From d4b1adc50de250f7e9ab904a8ca6b5afaa147b18 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 7 Aug 2025 11:56:22 +0200 Subject: [PATCH 37/58] Quick clean-up of RK45 in vectorized kernels --- parcels/application_kernels/advection.py | 27 +++++++++++++----------- tests/v4/test_advection.py | 18 ++++++++-------- 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/parcels/application_kernels/advection.py b/parcels/application_kernels/advection.py index 01d9a07e1..e4d4ecce0 100644 --- a/parcels/application_kernels/advection.py +++ b/parcels/application_kernels/advection.py @@ -119,11 +119,10 @@ def AdvectionRK45(particle, fieldset, time): # pragma: no cover particle.next_dt = np.where( dt > fieldset.RK45_max_dt, fieldset.RK45_max_dt * np.timedelta64(1, "s"), particle.next_dt ) - particle.next_dt = np.where( - dt < fieldset.RK45_min_dt, fieldset.RK45_min_dt * np.timedelta64(1, "s"), particle.next_dt - ) - particle.dt = particle.next_dt + repeat_particles = dt < fieldset.RK45_min_dt + particle.next_dt = np.where(repeat_particles, fieldset.RK45_min_dt * np.timedelta64(1, "s"), particle.next_dt) particle.state = np.where(dt < fieldset.RK45_min_dt, StatusCode.Repeat, particle.state) + particle.dt = particle.next_dt dt = np.where(dt > fieldset.RK45_max_dt, fieldset.RK45_max_dt, dt) c = [1.0 / 4.0, 3.0 / 8.0, 12.0 / 13.0, 1.0, 1.0 / 2.0] @@ -166,14 +165,18 @@ def AdvectionRK45(particle, fieldset, time): # pragma: no cover lon_5th = (u1 * b5[0] + u2 * b5[1] + u3 * b5[2] + u4 * b5[3] + u5 * b5[4] + u6 * b5[5]) * dt lat_5th = (v1 * b5[0] + v2 * b5[1] + v3 * b5[2] + v4 * b5[3] + v5 * b5[4] + v6 * b5[5]) * dt - kappa = math.sqrt(math.pow(lon_5th - lon_4th, 2) + math.pow(lat_5th - lat_4th, 2)) - if (kappa <= fieldset.RK45_tol) or (math.fabs(dt) < math.fabs(fieldset.RK45_min_dt)): - particle.dlon += lon_4th - particle.dlat += lat_4th - if (kappa <= fieldset.RK45_tol / 10) and (math.fabs(dt * 2) <= math.fabs(fieldset.RK45_max_dt)): - particle.next_dt *= 2 - else: - particle.next_dt /= 2 + kappa = np.sqrt(np.pow(lon_5th - lon_4th, 2) + np.pow(lat_5th - lat_4th, 2)) + + good_particles = (kappa <= fieldset.RK45_tol / 10) & (np.fabs(dt * 2) <= np.fabs(fieldset.RK45_max_dt)) + repeat_particles = (kappa > fieldset.RK45_tol) & (np.fabs(dt) > np.fabs(fieldset.RK45_min_dt)) + + particle.next_dt = np.where(good_particles, particle.next_dt * 2, particle.next_dt) + particle.dlon += np.where(good_particles, lon_4th, 0) + particle.dlat += np.where(good_particles, lat_4th, 0) + + particle.next_dt = np.where(repeat_particles, particle.next_dt / 2, particle.next_dt) + particle.state = np.where(repeat_particles, StatusCode.Repeat, particle.state) + if np.any(repeat_particles): return StatusCode.Repeat diff --git a/tests/v4/test_advection.py b/tests/v4/test_advection.py index a571397c7..7e3efd89c 100644 --- a/tests/v4/test_advection.py +++ b/tests/v4/test_advection.py @@ -238,7 +238,7 @@ def test_radialrotation(npart=10): ("AdvDiffM1", 1e-2), ("RK4", 1e-5), ("RK4_3D", 1e-5), - ("RK45", 1e-5), + ("RK45", 1e-2), # TODO set this to 1e-5 when RK45 is fixed ], ) def test_moving_eddy(method, rtol): @@ -266,8 +266,8 @@ def test_moving_eddy(method, rtol): if method == "RK45": # Use RK45Particles to set next_dt - RK45Particles = Particle.add_variable(Variable("next_dt", initial=dt, dtype=np.timedelta64)) - fieldset.add_constant("RK45_tol", 1e-6) + RK45Particles = Particle.add_variable(Variable("next_dt", initial=dt, dtype="timedelta64[s]")) + fieldset.add_constant("RK45_tol", 1e-5) pclass = RK45Particles if method == "RK45" else Particle pset = ParticleSet( @@ -309,8 +309,8 @@ def test_decaying_moving_eddy(method, rtol): if method == "RK45": # Use RK45Particles to set next_dt - RK45Particles = Particle.add_variable(Variable("next_dt", initial=dt, dtype=np.timedelta64)) - fieldset.add_constant("RK45_tol", 1e-6) + RK45Particles = Particle.add_variable(Variable("next_dt", initial=dt, dtype="timedelta64[s]")) + fieldset.add_constant("RK45_tol", 1e-5) pclass = RK45Particles if method == "RK45" else Particle @@ -340,8 +340,8 @@ def truth_moving(x_0, y_0, t): @pytest.mark.parametrize( "method, atol", [ - ("RK4", 1), - ("RK45", 1), + pytest.param("RK4", 1), + pytest.param("RK45", 1, marks=pytest.mark.skip(reason="still needs to be fixed")), ], ) @pytest.mark.parametrize("grid_type", ["A"]) # TODO also implement C-grid once available @@ -373,10 +373,10 @@ def test_gyre_flowfields(method, grid_type, atol, flowfield): [ Variable("p", initial=0.0, dtype=np.float32), Variable("p_start", initial=0.0, dtype=np.float32), - Variable("next_dt", initial=dt, dtype=np.timedelta64), + Variable("next_dt", initial=dt, dtype="timedelta64[s]"), ] ) - fieldset.add_constant("RK45_tol", 1e-6) + fieldset.add_constant("RK45_tol", 1e-3) else: SampleParticle = Particle.add_variable( [Variable("p", initial=0.0, dtype=np.float32), Variable("p_start", initial=0.0, dtype=np.float32)] From 9ee900e99e91c95411d266961881575df830044e Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 7 Aug 2025 12:19:33 +0200 Subject: [PATCH 38/58] fixing merge issues --- parcels/field.py | 9 +++++---- tests/v4/test_advection.py | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/parcels/field.py b/parcels/field.py index 7c2170ac9..de9c3fa43 100644 --- a/parcels/field.py +++ b/parcels/field.py @@ -18,6 +18,7 @@ ) from parcels.application_kernels.interpolation import UXPiecewiseLinearNode, XLinear, ZeroInterpolator from parcels.particle import KernelParticle +from parcels.particleset import ParticleSet from parcels.tools.converters import ( UnitConverter, unitconverters_map, @@ -35,9 +36,9 @@ def _deal_with_errors(error, key, vector_type: VectorType): - if isinstance(key, KernelParticle): + if isinstance(key, ParticleSet): key.state = AllParcelsErrorCodes[type(error)] - elif isinstance(key[-1], KernelParticle): + elif isinstance(key[-1], ParticleSet): key[-1].state = AllParcelsErrorCodes[type(error)] else: raise RuntimeError(f"{error}. Error could not be handled because particle was not part of the Field Sampling.") @@ -255,7 +256,7 @@ def eval(self, time: datetime, z, y, x, particle=None, applyConversion=True): def __getitem__(self, key): self._check_velocitysampling() try: - if isinstance(key, KernelParticle): + if isinstance(key, (KernelParticle, ParticleSet)): return self.eval(key.time, key.depth, key.lat, key.lon, key) else: return self.eval(*key) @@ -347,7 +348,7 @@ def eval(self, time: datetime, z, y, x, particle=None, applyConversion=True): def __getitem__(self, key): try: - if isinstance(key, KernelParticle): + if isinstance(key, (KernelParticle, ParticleSet)): return self.eval(key.time, key.depth, key.lat, key.lon, key) else: return self.eval(*key) diff --git a/tests/v4/test_advection.py b/tests/v4/test_advection.py index ab6142dd7..12a9af05d 100644 --- a/tests/v4/test_advection.py +++ b/tests/v4/test_advection.py @@ -238,7 +238,7 @@ def test_radialrotation(npart=10): ("AdvDiffM1", 1e-2), ("RK4", 1e-5), ("RK4_3D", 1e-5), - ("RK45", 1e-2), # TODO set this to 1e-5 when RK45 is fixed + pytest.param("RK45", 1, marks=pytest.mark.skip(reason="still needs to be fixed")), ], ) def test_moving_eddy(method, rtol): @@ -293,7 +293,7 @@ def truth_moving(x_0, y_0, t): [ ("EE", 1e-2), ("RK4", 1e-5), - ("RK45", 1e-3), + pytest.param("RK45", 1e-5, marks=pytest.mark.skip(reason="still needs to be fixed")), ], ) def test_decaying_moving_eddy(method, rtol): From 354b9bebc7cc28e0de6a100958e97dd638811ba2 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 13 Aug 2025 08:52:45 +0200 Subject: [PATCH 39/58] Changing time_interval.__contains__ to is_all_time_in_interval() As suggested by @VeckoTheGecko in review of #2122 (https://github.com/OceanParcels/Parcels/pull/2122#discussion_r2263041338) --- parcels/_core/utils/time.py | 4 ++-- parcels/_index_search.py | 2 +- tests/v4/utils/test_time.py | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/parcels/_core/utils/time.py b/parcels/_core/utils/time.py index 5236987a2..94dff4671 100644 --- a/parcels/_core/utils/time.py +++ b/parcels/_core/utils/time.py @@ -45,8 +45,8 @@ def __init__(self, left: T, right: T) -> None: self.left = left self.right = right - def __contains__(self, item: T) -> bool: - item = np.atleast_1d(item) + def is_all_time_in_interval(self, time): + item = np.atleast_1d(time) return (self.left <= item).all() and (item <= self.right).all() def __repr__(self) -> str: diff --git a/parcels/_index_search.py b/parcels/_index_search.py index 600cfe724..845c0eb5e 100644 --- a/parcels/_index_search.py +++ b/parcels/_index_search.py @@ -41,7 +41,7 @@ def _search_time_index(field: Field, time: datetime): if field.time_interval is None: return np.zeros(shape=time.shape, dtype=np.float32), np.zeros(shape=time.shape, dtype=np.int32) - if time not in field.time_interval: + if not field.time_interval.is_all_time_in_interval(time): _raise_time_extrapolation_error(time, field=None) ti = np.searchsorted(field.data.time.data, time, side="right") - 1 diff --git a/tests/v4/utils/test_time.py b/tests/v4/utils/test_time.py index b5105ac3f..497a4eb29 100644 --- a/tests/v4/utils/test_time.py +++ b/tests/v4/utils/test_time.py @@ -84,9 +84,9 @@ def test_time_interval_contains(interval): right = interval.right middle = left + (right - left) / 2 - assert left in interval - assert right in interval - assert middle in interval + assert interval.is_all_time_in_interval(left) + assert interval.is_all_time_in_interval(right) + assert interval.is_all_time_in_interval(middle) @given(time_interval_strategy(calendar="365_day"), time_interval_strategy(calendar="365_day")) From d6677b2bf9088ed7c8c2931e244ee42a9b07b84a Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 13 Aug 2025 09:00:28 +0200 Subject: [PATCH 40/58] Renaming test function to be more explicit --- tests/v4/test_interpolation.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/v4/test_interpolation.py b/tests/v4/test_interpolation.py index 0d76442d1..959066992 100644 --- a/tests/v4/test_interpolation.py +++ b/tests/v4/test_interpolation.py @@ -39,8 +39,7 @@ def test_interpolation_mesh_type(mesh_type, npart=10): assert U.eval(time, 0, lat, 0, applyConversion=False) == 1 -def test_default_interpolator(): - """Test that the default interpolator is set correctly.""" +def test_default_interpolator_set_correctly(): ds = simple_UV_dataset() grid = XGrid.from_dataset(ds) U = Field("U", ds["U"], grid) From 535371a82d9f0dc50c81aabf08b75c1a14ff6ce6 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 13 Aug 2025 09:27:42 +0200 Subject: [PATCH 41/58] Expanding unit test to make its function clearer --- tests/v4/test_particleset_execute.py | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/tests/v4/test_particleset_execute.py b/tests/v4/test_particleset_execute.py index c056d4f4e..12d5b423f 100644 --- a/tests/v4/test_particleset_execute.py +++ b/tests/v4/test_particleset_execute.py @@ -86,20 +86,26 @@ def test_execution_endtime(fieldset, starttime, endtime, dt): assert abs(pset.time_nextloop - endtime) < np.timedelta64(1, "ms") -@pytest.mark.parametrize("direction", [-1, 1]) -def test_pset_starttime_outside_execute(fieldset, direction): - if direction == 1: - endtime = fieldset.time_interval.left + np.timedelta64(8, "s") - start_times = [fieldset.time_interval.left + np.timedelta64(t, "s") for t in [0, 2, 10]] - else: - endtime = fieldset.time_interval.right - np.timedelta64(8, "s") - start_times = [fieldset.time_interval.right - np.timedelta64(t, "s") for t in [0, 2, 10]] +def test_dont_run_particles_outside_starttime(fieldset): + # Test forward in time (note third particle is outside endtime) + start_times = [fieldset.time_interval.left + np.timedelta64(t, "s") for t in [0, 2, 10]] + endtime = fieldset.time_interval.left + np.timedelta64(8, "s") + + pset = ParticleSet(fieldset, lon=np.zeros(len(start_times)), lat=np.zeros(len(start_times)), time=start_times) + + pset.execute(DoNothing, dt=np.timedelta64(1, "s"), endtime=endtime) + assert pset.time_nextloop[0:1] == endtime + assert pset.time_nextloop[2] == start_times[2] # this particle has not been executed + + # Test backward in time (note third particle is outside endtime) + start_times = [fieldset.time_interval.right - np.timedelta64(t, "s") for t in [0, 2, 10]] + endtime = fieldset.time_interval.right - np.timedelta64(8, "s") pset = ParticleSet(fieldset, lon=np.zeros(len(start_times)), lat=np.zeros(len(start_times)), time=start_times) - pset.execute(DoNothing, dt=direction * np.timedelta64(1, "s"), endtime=endtime) + pset.execute(DoNothing, dt=-np.timedelta64(1, "s"), endtime=endtime) assert pset.time_nextloop[0:1] == endtime - assert pset.time_nextloop[2] == start_times[2] + assert pset.time_nextloop[2] == start_times[2] # this particle has not been executed @pytest.mark.parametrize( From d0e6573b031f1eab4a9d65247a03cbe6ec0d47ff Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 13 Aug 2025 09:32:30 +0200 Subject: [PATCH 42/58] Adding vectorised tests for _search_1d_array Including also a unit test with mixed outofbounds and valid locations --- tests/v4/test_xgrid.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/tests/v4/test_xgrid.py b/tests/v4/test_xgrid.py index b23b981f1..d132e5900 100644 --- a/tests/v4/test_xgrid.py +++ b/tests/v4/test_xgrid.py @@ -156,7 +156,7 @@ def corner_to_cell_center_points(lat, lon): @pytest.mark.parametrize( "array, x, expected_xi, expected_xsi", [ - (np.array([1, 2, 3, 4, 5]), 1.1, 0, 0.1), + (np.array([1, 2, 3, 4, 5]), (1.1, 2.1), (0, 1), (0.1, 0.1)), (np.array([1, 2, 3, 4, 5]), 2.1, 1, 0.1), (np.array([1, 2, 3, 4, 5]), 3.1, 2, 0.1), (np.array([1, 2, 3, 4, 5]), 4.5, 3, 0.5), @@ -164,8 +164,8 @@ def corner_to_cell_center_points(lat, lon): ) def test_search_1d_array(array, x, expected_xi, expected_xsi): xi, xsi = _search_1d_array(array, x) - assert xi == expected_xi - assert np.isclose(xsi, expected_xsi) + np.testing.assert_array_equal(xi, expected_xi) + np.testing.assert_allclose(xsi, expected_xsi) @pytest.mark.parametrize( @@ -180,6 +180,18 @@ def test_search_1d_array_out_of_bounds(array, x, expected_xi): assert xi == expected_xi +@pytest.mark.parametrize( + "array, x, expected_xi", + [ + (np.array([1, 2, 3, 4, 5]), (-0.1, 2.5), (LEFT_OUT_OF_BOUNDS, 1)), + (np.array([1, 2, 3, 4, 5]), (6.5, 1), (RIGHT_OUT_OF_BOUNDS, 0)), + ], +) +def test_search_1d_array_some_out_of_bounds(array, x, expected_xi): + xi, _ = _search_1d_array(array, x) + np.testing.assert_array_equal(xi, expected_xi) + + @pytest.mark.parametrize( "ds, da_name, expected", [ From f375db0005b9f4750baeca7f70ac3c164badaf31 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 13 Aug 2025 13:37:42 +0200 Subject: [PATCH 43/58] vectorized version of _search_indices_curvilinear_2d --- parcels/_index_search.py | 77 +++++++++++++++------------------------- 1 file changed, 29 insertions(+), 48 deletions(-) diff --git a/parcels/_index_search.py b/parcels/_index_search.py index 845c0eb5e..c3230797a 100644 --- a/parcels/_index_search.py +++ b/parcels/_index_search.py @@ -254,12 +254,12 @@ def _search_indices_curvilinear_2d( ): yi, xi = yi_guess, xi_guess if yi is None: - yi = int(grid.ydim / 2) - 1 + yi = (int(grid.ydim / 2) - 1) * np.ones(len(x), dtype=int) if xi is None: - xi = int(grid.xdim / 2) - 1 + xi = (int(grid.xdim / 2) - 1) * np.ones(len(x), dtype=int) - xsi = eta = -1.0 + xsi = eta = -1.0 * np.ones(len(x), dtype=float) invA = np.array( [ [1, 0, 0, 0], @@ -283,7 +283,7 @@ def _search_indices_curvilinear_2d( # if y < field.lonlat_minmax[2] or y > field.lonlat_minmax[3]: # _raise_field_out_of_bound_error(z, y, x) - while xsi < -tol or xsi > 1 + tol or eta < -tol or eta > 1 + tol: + while np.any(xsi < -tol) or np.any(xsi > 1 + tol) or np.any(eta < -tol) or np.any(eta > 1 + tol): px = np.array([grid.lon[yi, xi], grid.lon[yi, xi + 1], grid.lon[yi + 1, xi + 1], grid.lon[yi + 1, xi]]) py = np.array([grid.lat[yi, xi], grid.lat[yi, xi + 1], grid.lat[yi + 1, xi + 1], grid.lat[yi + 1, xi]]) @@ -293,40 +293,29 @@ def _search_indices_curvilinear_2d( aa = a[3] * b[2] - a[2] * b[3] bb = a[3] * b[0] - a[0] * b[3] + a[1] * b[2] - a[2] * b[1] + x * b[3] - y * a[3] cc = a[1] * b[0] - a[0] * b[1] + x * b[1] - y * a[1] - if abs(aa) < 1e-12: # Rectilinear cell, or quasi - eta = -cc / bb - else: - det2 = bb * bb - 4 * aa * cc - if det2 > 0: # so, if det is nan we keep the xsi, eta from previous iter - det = np.sqrt(det2) - eta = (-bb + det) / (2 * aa) - if abs(a[1] + a[3] * eta) < 1e-12: # this happens when recti cell rotated of 90deg - xsi = ((y - py[0]) / (py[1] - py[0]) + (y - py[3]) / (py[2] - py[3])) * 0.5 - else: - xsi = (x - a[0] - a[2] * eta) / (a[1] + a[3] * eta) - if xsi < 0 and eta < 0 and xi == 0 and yi == 0: - _raise_field_out_of_bound_error(0, y, x) - if xsi > 1 and eta > 1 and xi == grid.xdim - 1 and yi == grid.ydim - 1: - _raise_field_out_of_bound_error(0, y, x) - if xsi < -tol: - xi -= 1 - elif xsi > 1 + tol: - xi += 1 - if eta < -tol: - yi -= 1 - elif eta > 1 + tol: - yi += 1 + + det2 = bb * bb - 4 * aa * cc + det = np.where(det2 > 0, np.sqrt(det2), eta) + eta = np.where(abs(aa) < 1e-12, -cc / bb, np.where(det2 > 0, (-bb + det) / (2 * aa), eta)) + + xsi = np.where( + abs(a[1] + a[3] * eta) < 1e-12, + ((y - py[0]) / (py[1] - py[0]) + (y - py[3]) / (py[2] - py[3])) * 0.5, + (x - a[0] - a[2] * eta) / (a[1] + a[3] * eta), + ) + + xi = np.where(xsi < -tol, xi - 1, np.where(xsi > 1 + tol, xi + 1, xi)) + yi = np.where(eta < -tol, yi - 1, np.where(eta > 1 + tol, yi + 1, yi)) + (yi, xi) = _reconnect_bnd_indices(yi, xi, grid.ydim, grid.xdim, grid.mesh) it += 1 if it > maxIterSearch: print(f"Correct cell not found after {maxIterSearch} iterations") _raise_field_out_of_bound_error(0, y, x) - xsi = max(0.0, xsi) - eta = max(0.0, eta) - xsi = min(1.0, xsi) - eta = min(1.0, eta) + xsi = np.where(xsi < 0.0, 0.0, np.where(xsi > 1.0, 1.0, xsi)) + eta = np.where(eta < 0.0, 0.0, np.where(eta > 1.0, 1.0, eta)) - if not ((0 <= xsi <= 1) and (0 <= eta <= 1)): + if np.any((xsi < 0) | (xsi > 1) | (eta < 0) | (eta > 1)): _raise_field_sampling_error(y, x) return (yi, eta, xi, xsi) @@ -423,20 +412,12 @@ def _search_indices_curvilinear(field, time, z, y, x, ti, particle=None, search2 def _reconnect_bnd_indices(yi: int, xi: int, ydim: int, xdim: int, sphere_mesh: bool): - if xi < 0: - if sphere_mesh: - xi = xdim - 2 - else: - xi = 0 - if xi > xdim - 2: - if sphere_mesh: - xi = 0 - else: - xi = xdim - 2 - if yi < 0: - yi = 0 - if yi > ydim - 2: - yi = ydim - 2 - if sphere_mesh: - xi = xdim - xi + xi = np.where(xi < 0, (xdim - 2) if sphere_mesh else 0, xi) + xi = np.where(xi > xdim - 2, 0 if sphere_mesh else (xdim - 2), xi) + + xi = np.where(yi > ydim - 2, xdim - xi if sphere_mesh else xi, xi) + + yi = np.where(yi < 0, 0, yi) + yi = np.where(yi > ydim - 2, ydim - 2, yi) + return yi, xi From af9aee0e921435c59aa0fd47f7ce7945dcd97d44 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 13 Aug 2025 16:41:29 +0200 Subject: [PATCH 44/58] Fixing RK45 for vectorized kernels --- parcels/application_kernels/advection.py | 14 ++++++++------ tests/v4/test_advection.py | 8 ++++---- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/parcels/application_kernels/advection.py b/parcels/application_kernels/advection.py index e4d4ecce0..7ad5ce051 100644 --- a/parcels/application_kernels/advection.py +++ b/parcels/application_kernels/advection.py @@ -167,17 +167,19 @@ def AdvectionRK45(particle, fieldset, time): # pragma: no cover kappa = np.sqrt(np.pow(lon_5th - lon_4th, 2) + np.pow(lat_5th - lat_4th, 2)) - good_particles = (kappa <= fieldset.RK45_tol / 10) & (np.fabs(dt * 2) <= np.fabs(fieldset.RK45_max_dt)) - repeat_particles = (kappa > fieldset.RK45_tol) & (np.fabs(dt) > np.fabs(fieldset.RK45_min_dt)) - - particle.next_dt = np.where(good_particles, particle.next_dt * 2, particle.next_dt) + good_particles = (kappa <= fieldset.RK45_tol) | (np.fabs(dt) <= np.fabs(fieldset.RK45_min_dt)) particle.dlon += np.where(good_particles, lon_4th, 0) particle.dlat += np.where(good_particles, lat_4th, 0) + increase_dt_particles = ( + good_particles & (kappa <= fieldset.RK45_tol / 10) & (np.fabs(dt * 2) <= np.fabs(fieldset.RK45_max_dt)) + ) + particle.next_dt = np.where(increase_dt_particles, particle.next_dt * 2, particle.next_dt) + particle.state = np.where(good_particles, StatusCode.Success, particle.state) + + repeat_particles = np.invert(good_particles) particle.next_dt = np.where(repeat_particles, particle.next_dt / 2, particle.next_dt) particle.state = np.where(repeat_particles, StatusCode.Repeat, particle.state) - if np.any(repeat_particles): - return StatusCode.Repeat def AdvectionAnalytical(particle, fieldset, time): # pragma: no cover diff --git a/tests/v4/test_advection.py b/tests/v4/test_advection.py index 12a9af05d..ceca91eb6 100644 --- a/tests/v4/test_advection.py +++ b/tests/v4/test_advection.py @@ -238,7 +238,7 @@ def test_radialrotation(npart=10): ("AdvDiffM1", 1e-2), ("RK4", 1e-5), ("RK4_3D", 1e-5), - pytest.param("RK45", 1, marks=pytest.mark.skip(reason="still needs to be fixed")), + ("RK45", 1), ], ) def test_moving_eddy(method, rtol): @@ -293,7 +293,7 @@ def truth_moving(x_0, y_0, t): [ ("EE", 1e-2), ("RK4", 1e-5), - pytest.param("RK45", 1e-5, marks=pytest.mark.skip(reason="still needs to be fixed")), + ("RK45", 1e-5), ], ) def test_decaying_moving_eddy(method, rtol): @@ -340,8 +340,8 @@ def truth_moving(x_0, y_0, t): @pytest.mark.parametrize( "method, atol", [ - pytest.param("RK4", 1), - pytest.param("RK45", 1, marks=pytest.mark.skip(reason="still needs to be fixed")), + ("RK4", 1), + ("RK45", 1), ], ) @pytest.mark.parametrize("grid_type", ["A"]) # TODO also implement C-grid once available From 4c5112482410fed02108cf79e54588196126d7fb Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 13 Aug 2025 16:48:23 +0200 Subject: [PATCH 45/58] Fixing breaking test now that _search_inidces_curvilinear_2d expects numpy arrays --- parcels/_index_search.py | 2 +- tests/v4/test_index_search.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/parcels/_index_search.py b/parcels/_index_search.py index c3230797a..3361ed6c2 100644 --- a/parcels/_index_search.py +++ b/parcels/_index_search.py @@ -251,7 +251,7 @@ def _search_indices_rectilinear( def _search_indices_curvilinear_2d( grid: XGrid, y: float, x: float, yi_guess: int | None = None, xi_guess: int | None = None -): +): # TODO fix typing instructions to make clear that y, x etc need to be ndarrays yi, xi = yi_guess, xi_guess if yi is None: yi = (int(grid.ydim / 2) - 1) * np.ones(len(x), dtype=int) diff --git a/tests/v4/test_index_search.py b/tests/v4/test_index_search.py index 5d5c602b9..5c1a47307 100644 --- a/tests/v4/test_index_search.py +++ b/tests/v4/test_index_search.py @@ -26,8 +26,8 @@ def test_grid_indexing_fpoints(field_cone): for yi_expected in range(grid.ydim - 1): for xi_expected in range(grid.xdim - 1): - x = grid.lon[yi_expected, xi_expected] + 0.00001 - y = grid.lat[yi_expected, xi_expected] + 0.00001 + x = np.array([grid.lon[yi_expected, xi_expected] + 0.00001]) + y = np.array([grid.lat[yi_expected, xi_expected] + 0.00001]) yi, eta, xi, xsi = _search_indices_curvilinear_2d(grid, y, x) if eta > 0.9: From 9335ddbed7b2288f93de71f1822c22367f24479e Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 13 Aug 2025 16:53:18 +0200 Subject: [PATCH 46/58] Using particle.time in the advection kernels instead of the kernel-time; which is not well defined in vectorized form --- parcels/application_kernels/advection.py | 48 ++++++++++++------------ parcels/kernel.py | 2 +- 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/parcels/application_kernels/advection.py b/parcels/application_kernels/advection.py index 7ad5ce051..e3590dd0b 100644 --- a/parcels/application_kernels/advection.py +++ b/parcels/application_kernels/advection.py @@ -21,11 +21,11 @@ def AdvectionRK4(particle, fieldset, time): # pragma: no cover dt = particle.dt / np.timedelta64(1, "s") # TODO: improve API for converting dt to seconds (u1, v1) = fieldset.UV[particle] lon1, lat1 = (particle.lon + u1 * 0.5 * dt, particle.lat + v1 * 0.5 * dt) - (u2, v2) = fieldset.UV[time + 0.5 * particle.dt, particle.depth, lat1, lon1, particle] + (u2, v2) = fieldset.UV[particle.time + 0.5 * particle.dt, particle.depth, lat1, lon1, particle] lon2, lat2 = (particle.lon + u2 * 0.5 * dt, particle.lat + v2 * 0.5 * dt) - (u3, v3) = fieldset.UV[time + 0.5 * particle.dt, particle.depth, lat2, lon2, particle] + (u3, v3) = fieldset.UV[particle.time + 0.5 * particle.dt, particle.depth, lat2, lon2, particle] lon3, lat3 = (particle.lon + u3 * dt, particle.lat + v3 * dt) - (u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3, particle] + (u4, v4) = fieldset.UV[particle.time + particle.dt, particle.depth, lat3, lon3, particle] particle.dlon += (u1 + 2 * u2 + 2 * u3 + u4) / 6.0 * dt particle.dlat += (v1 + 2 * v2 + 2 * v3 + v4) / 6.0 * dt @@ -37,15 +37,15 @@ def AdvectionRK4_3D(particle, fieldset, time): # pragma: no cover lon1 = particle.lon + u1 * 0.5 * dt lat1 = particle.lat + v1 * 0.5 * dt dep1 = particle.depth + w1 * 0.5 * dt - (u2, v2, w2) = fieldset.UVW[time + 0.5 * particle.dt, dep1, lat1, lon1, particle] + (u2, v2, w2) = fieldset.UVW[particle.time + 0.5 * particle.dt, dep1, lat1, lon1, particle] lon2 = particle.lon + u2 * 0.5 * dt lat2 = particle.lat + v2 * 0.5 * dt dep2 = particle.depth + w2 * 0.5 * dt - (u3, v3, w3) = fieldset.UVW[time + 0.5 * particle.dt, dep2, lat2, lon2, particle] + (u3, v3, w3) = fieldset.UVW[particle.time + 0.5 * particle.dt, dep2, lat2, lon2, particle] lon3 = particle.lon + u3 * dt lat3 = particle.lat + v3 * dt dep3 = particle.depth + w3 * dt - (u4, v4, w4) = fieldset.UVW[time + particle.dt, dep3, lat3, lon3, particle] + (u4, v4, w4) = fieldset.UVW[particle.time + particle.dt, dep3, lat3, lon3, particle] particle.dlon += (u1 + 2 * u2 + 2 * u3 + u4) / 6 * dt particle.dlat += (v1 + 2 * v2 + 2 * v3 + v4) / 6 * dt particle.ddepth += (w1 + 2 * w2 + 2 * w3 + w4) / 6 * dt @@ -56,35 +56,35 @@ def AdvectionRK4_3D_CROCO(particle, fieldset, time): # pragma: no cover This kernel assumes the vertical velocity is the 'w' field from CROCO output and works on sigma-layers. """ dt = particle.dt / np.timedelta64(1, "s") # TODO: improve API for converting dt to seconds - sig_dep = particle.depth / fieldset.H[time, 0, particle.lat, particle.lon] + sig_dep = particle.depth / fieldset.H[particle.time, 0, particle.lat, particle.lon] - (u1, v1, w1) = fieldset.UVW[time, particle.depth, particle.lat, particle.lon, particle] - w1 *= sig_dep / fieldset.H[time, 0, particle.lat, particle.lon] + (u1, v1, w1) = fieldset.UVW[particle.time, particle.depth, particle.lat, particle.lon, particle] + w1 *= sig_dep / fieldset.H[particle.time, 0, particle.lat, particle.lon] lon1 = particle.lon + u1 * 0.5 * dt lat1 = particle.lat + v1 * 0.5 * dt sig_dep1 = sig_dep + w1 * 0.5 * dt - dep1 = sig_dep1 * fieldset.H[time, 0, lat1, lon1] + dep1 = sig_dep1 * fieldset.H[particle.time, 0, lat1, lon1] - (u2, v2, w2) = fieldset.UVW[time + 0.5 * particle.dt, dep1, lat1, lon1, particle] - w2 *= sig_dep1 / fieldset.H[time, 0, lat1, lon1] + (u2, v2, w2) = fieldset.UVW[particle.time + 0.5 * particle.dt, dep1, lat1, lon1, particle] + w2 *= sig_dep1 / fieldset.H[particle.time, 0, lat1, lon1] lon2 = particle.lon + u2 * 0.5 * dt lat2 = particle.lat + v2 * 0.5 * dt sig_dep2 = sig_dep + w2 * 0.5 * dt - dep2 = sig_dep2 * fieldset.H[time, 0, lat2, lon2] + dep2 = sig_dep2 * fieldset.H[particle.time, 0, lat2, lon2] - (u3, v3, w3) = fieldset.UVW[time + 0.5 * particle.dt, dep2, lat2, lon2, particle] - w3 *= sig_dep2 / fieldset.H[time, 0, lat2, lon2] + (u3, v3, w3) = fieldset.UVW[particle.time + 0.5 * particle.dt, dep2, lat2, lon2, particle] + w3 *= sig_dep2 / fieldset.H[particle.time, 0, lat2, lon2] lon3 = particle.lon + u3 * dt lat3 = particle.lat + v3 * dt sig_dep3 = sig_dep + w3 * dt - dep3 = sig_dep3 * fieldset.H[time, 0, lat3, lon3] + dep3 = sig_dep3 * fieldset.H[particle.time, 0, lat3, lon3] - (u4, v4, w4) = fieldset.UVW[time + particle.dt, dep3, lat3, lon3, particle] - w4 *= sig_dep3 / fieldset.H[time, 0, lat3, lon3] + (u4, v4, w4) = fieldset.UVW[particle.time + particle.dt, dep3, lat3, lon3, particle] + w4 *= sig_dep3 / fieldset.H[particle.time, 0, lat3, lon3] lon4 = particle.lon + u4 * dt lat4 = particle.lat + v4 * dt sig_dep4 = sig_dep + w4 * dt - dep4 = sig_dep4 * fieldset.H[time, 0, lat4, lon4] + dep4 = sig_dep4 * fieldset.H[particle.time, 0, lat4, lon4] particle.dlon += (u1 + 2 * u2 + 2 * u3 + u4) / 6 * dt particle.dlat += (v1 + 2 * v2 + 2 * v3 + v4) / 6 * dt @@ -138,27 +138,27 @@ def AdvectionRK45(particle, fieldset, time): # pragma: no cover (u1, v1) = fieldset.UV[particle] lon1, lat1 = (particle.lon + u1 * A[0][0] * dt, particle.lat + v1 * A[0][0] * dt) - (u2, v2) = fieldset.UV[time + c[0] * particle.dt, particle.depth, lat1, lon1, particle] + (u2, v2) = fieldset.UV[particle.time + c[0] * particle.dt, particle.depth, lat1, lon1, particle] lon2, lat2 = ( particle.lon + (u1 * A[1][0] + u2 * A[1][1]) * dt, particle.lat + (v1 * A[1][0] + v2 * A[1][1]) * dt, ) - (u3, v3) = fieldset.UV[time + c[1] * particle.dt, particle.depth, lat2, lon2, particle] + (u3, v3) = fieldset.UV[particle.time + c[1] * particle.dt, particle.depth, lat2, lon2, particle] lon3, lat3 = ( particle.lon + (u1 * A[2][0] + u2 * A[2][1] + u3 * A[2][2]) * dt, particle.lat + (v1 * A[2][0] + v2 * A[2][1] + v3 * A[2][2]) * dt, ) - (u4, v4) = fieldset.UV[time + c[2] * particle.dt, particle.depth, lat3, lon3, particle] + (u4, v4) = fieldset.UV[particle.time + c[2] * particle.dt, particle.depth, lat3, lon3, particle] lon4, lat4 = ( particle.lon + (u1 * A[3][0] + u2 * A[3][1] + u3 * A[3][2] + u4 * A[3][3]) * dt, particle.lat + (v1 * A[3][0] + v2 * A[3][1] + v3 * A[3][2] + v4 * A[3][3]) * dt, ) - (u5, v5) = fieldset.UV[time + c[3] * particle.dt, particle.depth, lat4, lon4, particle] + (u5, v5) = fieldset.UV[particle.time + c[3] * particle.dt, particle.depth, lat4, lon4, particle] lon5, lat5 = ( particle.lon + (u1 * A[4][0] + u2 * A[4][1] + u3 * A[4][2] + u4 * A[4][3] + u5 * A[4][4]) * dt, particle.lat + (v1 * A[4][0] + v2 * A[4][1] + v3 * A[4][2] + v4 * A[4][3] + v5 * A[4][4]) * dt, ) - (u6, v6) = fieldset.UV[time + c[4] * particle.dt, particle.depth, lat5, lon5, particle] + (u6, v6) = fieldset.UV[particle.time + c[4] * particle.dt, particle.depth, lat5, lon5, particle] lon_4th = (u1 * b4[0] + u2 * b4[1] + u3 * b4[2] + u4 * b4[3] + u5 * b4[4]) * dt lat_4th = (v1 * b4[0] + v2 * b4[1] + v3 * b4[2] + v4 * b4[3] + v5 * b4[4]) * dt diff --git a/parcels/kernel.py b/parcels/kernel.py index 4a88343dd..eeca60584 100644 --- a/parcels/kernel.py +++ b/parcels/kernel.py @@ -305,7 +305,7 @@ def evaluate_pset(self, pset, endtime): res = None for f in self._pyfuncs: # TODO remove "time" from kernel signature in v4; because it doesn't make sense for vectorized particles - res_tmp = f(pset, self._fieldset, pset.time_nextloop[0]) + res_tmp = f(pset, self._fieldset, None) if res_tmp is not None: # TODO v4: Remove once all kernels return StatusCode res = res_tmp if res in [StatusCode.StopExecution, StatusCode.Repeat]: From 04946b7ded00542aabe61238d9a969c542993af1 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Wed, 13 Aug 2025 17:00:35 +0200 Subject: [PATCH 47/58] Fix unit test --- tests/v4/test_advection.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/v4/test_advection.py b/tests/v4/test_advection.py index ceca91eb6..6b6f322b1 100644 --- a/tests/v4/test_advection.py +++ b/tests/v4/test_advection.py @@ -383,10 +383,9 @@ def test_gyre_flowfields(method, grid_type, atol, flowfield): ) def UpdateP(particle, fieldset, time): # pragma: no cover - if time == 0: - particle.p_start = fieldset.P[particle.time, particle.depth, particle.lat, particle.lon] particle.p = fieldset.P[particle.time, particle.depth, particle.lat, particle.lon] + particle.p_start = np.where(particle.time == 0, particle.p, particle.p_start) pset = ParticleSet(fieldset, pclass=SampleParticle, lon=start_lon, lat=start_lat, time=np.timedelta64(0, "s")) pset.execute([kernel[method], UpdateP], dt=dt, runtime=runtime) - np.testing.assert_allclose(pset.p_start, pset.p, atol=atol) + np.testing.assert_allclose(pset.p, pset.p_start, atol=atol) From bee54642db18a0f4cbb08905bf470638527ed754 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 14 Aug 2025 10:58:16 +0200 Subject: [PATCH 48/58] Cleaning up kernel loop Also implementing better error handling --- parcels/kernel.py | 142 +++++++++++++++++----------------------------- 1 file changed, 51 insertions(+), 91 deletions(-) diff --git a/parcels/kernel.py b/parcels/kernel.py index eeca60584..017fc4237 100644 --- a/parcels/kernel.py +++ b/parcels/kernel.py @@ -213,67 +213,7 @@ def from_list(cls, fieldset, ptype, pyfunc_list): return cls(fieldset, ptype, pyfunc_list) def execute(self, pset, endtime, dt): - """Execute this Kernel over a ParticleSet for several timesteps.""" - pset._data["state"][:] = StatusCode.Evaluate - - if abs(dt) < np.timedelta64(1000, "ns"): # TODO still needed? - warnings.warn( - "'dt' is too small, causing numerical accuracy limit problems. Please chose a higher 'dt' and rather scale the 'time' axis of the field accordingly. (related issue #762)", - RuntimeWarning, - stacklevel=2, - ) - - if not self._positionupdate_kernels_added: - self.add_positionupdate_kernels() - self._positionupdate_kernels_added = True - - self.evaluate_pset(pset, endtime) - if any(pset.state == StatusCode.StopAllExecution): - return StatusCode.StopAllExecution - - # Remove all particles that signalled deletion - self.remove_deleted(pset) - - # Identify particles that threw errors - n_error = pset._num_error_particles - - while n_error > 0: - for i in pset._error_particles: - p = pset[i] - if p.state == StatusCode.StopExecution: - return - if p.state == StatusCode.StopAllExecution: - return StatusCode.StopAllExecution - if p.state == StatusCode.Repeat: - p.state = StatusCode.Evaluate - elif p.state == StatusCode.ErrorTimeExtrapolation: - raise TimeExtrapolationError(p.time) - elif p.state == StatusCode.ErrorOutOfBounds: - _raise_field_out_of_bound_error(p.depth, p.lat, p.lon) - elif p.state == StatusCode.ErrorThroughSurface: - _raise_field_out_of_bound_surface_error(p.depth, p.lat, p.lon) - elif p.state == StatusCode.Error: - _raise_field_sampling_error(p.depth, p.lat, p.lon) - elif p.state == StatusCode.Delete: - pass - else: - warnings.warn( - f"Deleting particle {p.trajectory} because of non-recoverable error", - RuntimeWarning, - stacklevel=2, - ) - p.delete() - - # Remove all particles that signalled deletion - self.remove_deleted(pset) # Generalizable version! - - # Re-execute Kernels to retry particles with StatusCode.Repeat - self.evaluate_pset(pset, endtime) - - n_error = pset._num_error_particles - - def evaluate_pset(self, pset, endtime): - """Execute the kernel evaluation of for the entire particle set. + """Execute this Kernel over a ParticleSet for several timesteps. Parameters ---------- @@ -282,43 +222,63 @@ def evaluate_pset(self, pset, endtime): endtime : endtime of this overall kernel evaluation step dt : - computational integration timestep + computational integration timestep from pset.execute """ - sign_dt = np.where(pset.dt >= 0, 1, -1) - while pset[0].state in [StatusCode.Evaluate, StatusCode.Repeat]: - if all(sign_dt * (endtime - pset.time_nextloop) <= 0): + compute_time_direction = 1 if dt > 0 else -1 + + pset._data["state"][:] = StatusCode.Evaluate + + if not self._positionupdate_kernels_added: + self.add_positionupdate_kernels() + self._positionupdate_kernels_added = True + + while (len(pset) > 0) and np.any(np.isin(pset.state, [StatusCode.Evaluate, StatusCode.Repeat])): + time_to_endtime = compute_time_direction * (endtime - pset.time_nextloop) + + if all(time_to_endtime <= 0): return pset - pre_dt = pset.dt + pre_dt = pset.dt # TODO check if needed + try: # Use next_dt from AdvectionRK45 if it is set - pset.next_dt = np.where( - sign_dt * (endtime - pset.time_nextloop) <= pset.next_dt, - np.where(sign_dt * (endtime - pset.time_nextloop) < 0, 0, sign_dt * (endtime - pset.time_nextloop)), - pset.next_dt, - ) + if compute_time_direction == 1: + pset.next_dt = np.maximum(np.minimum(pset.next_dt, time_to_endtime), 0) + else: + pset.next_dt = np.minimum(np.maximum(pset.next_dt, -time_to_endtime), 0) except KeyError: - pset.dt = np.where( - sign_dt * (endtime - pset.time_nextloop) <= pset.dt, - np.where(sign_dt * (endtime - pset.time_nextloop) < 0, 0, sign_dt * (endtime - pset.time_nextloop)), - pset.dt, - ) - res = None + if compute_time_direction == 1: + pset.dt = np.maximum(np.minimum(pset.dt, time_to_endtime), 0) + else: + pset.dt = np.minimum(np.maximum(pset.dt, -time_to_endtime), 0) + + inds = (np.isin(pset.state, [StatusCode.Evaluate, StatusCode.Repeat])) & (pset.dt != 0) for f in self._pyfuncs: # TODO remove "time" from kernel signature in v4; because it doesn't make sense for vectorized particles - res_tmp = f(pset, self._fieldset, None) - if res_tmp is not None: # TODO v4: Remove once all kernels return StatusCode - res = res_tmp - if res in [StatusCode.StopExecution, StatusCode.Repeat]: - break - - if res is None: - pset.state = np.where( - (pset.state == StatusCode.Success) & (sign_dt * (pset.time - endtime) > 0), - StatusCode.Evaluate, - pset.state, - ) - else: # TODO need to think how the kernel exitcode works on vectorized particleset - pset.state = res + f(pset[inds], self._fieldset, None) pset.dt = pre_dt + + # Reset particle state for particles that signalled success and have not reached endtime yet + particles_to_evaluate = (pset.state == StatusCode.Success) & (time_to_endtime > 0) + pset.state = np.where(particles_to_evaluate, StatusCode.Evaluate, pset.state) + + # delete particles that signalled deletion + self.remove_deleted(pset) + + # check and throw errors + if np.any(pset.state == StatusCode.ErrorTimeExtrapolation): + inds = pset.state == StatusCode.ErrorTimeExtrapolation + raise TimeExtrapolationError(pset[inds].time) + + errors_to_check = { + StatusCode.ErrorOutOfBounds: _raise_field_out_of_bound_error, + StatusCode.ErrorThroughSurface: _raise_field_out_of_bound_surface_error, + StatusCode.Error: _raise_field_sampling_error, + } + + for error_code, error_func in errors_to_check.items(): + if np.any(pset.state == error_code): + inds = pset.state == error_code + error_func(pset[inds].depth, pset[inds].lat, pset[inds].lon) + return pset From 7c6b21d0f6cc60d7d4dabb40619c6f07ad22363c Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 14 Aug 2025 10:58:35 +0200 Subject: [PATCH 49/58] Adding KernelParticle getitem --- parcels/particle.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/parcels/particle.py b/parcels/particle.py index 546f53cf6..8a51e3680 100644 --- a/parcels/particle.py +++ b/parcels/particle.py @@ -138,6 +138,13 @@ def __setattr__(self, name, value): else: self._data[name][self._index] = value + def __getitem__(self, index): + self._index = index + return self + + def __len__(self): + return len(self._index) + def _assert_no_duplicate_variable_names(*, existing_vars: list[Variable], new_vars: list[Variable]): existing_names = {var.name for var in existing_vars} From a2feb9e1c15b0d1d03e0745f11ea806af90646c3 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 14 Aug 2025 10:58:47 +0200 Subject: [PATCH 50/58] Fixing advection unit tests --- tests/v4/test_advection.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/v4/test_advection.py b/tests/v4/test_advection.py index 6b6f322b1..a35b83af7 100644 --- a/tests/v4/test_advection.py +++ b/tests/v4/test_advection.py @@ -128,11 +128,11 @@ def SubmergeParticle(particle, fieldset, time): # pragma: no cover return dt = particle.dt / np.timedelta64(1, "s") (u, v) = fieldset.UV[particle[inds]] - particle.dlon[inds] = u * dt - particle.dlat[inds] = v * dt - particle.ddepth[inds] = 0.0 - particle.depth[inds] = 0 - particle.state[inds] = StatusCode.Evaluate + particle[inds].dlon = u * dt + particle[inds].dlat = v * dt + particle[inds].ddepth = 0.0 + particle[inds].depth = 0 + particle[inds].state = StatusCode.Evaluate kernels = [AdvectionRK4_3D] if wErrorThroughSurface: @@ -238,7 +238,7 @@ def test_radialrotation(npart=10): ("AdvDiffM1", 1e-2), ("RK4", 1e-5), ("RK4_3D", 1e-5), - ("RK45", 1), + ("RK45", 1e-5), ], ) def test_moving_eddy(method, rtol): @@ -293,7 +293,7 @@ def truth_moving(x_0, y_0, t): [ ("EE", 1e-2), ("RK4", 1e-5), - ("RK45", 1e-5), + ("RK45", 2e-5), ], ) def test_decaying_moving_eddy(method, rtol): From 654ae1a6833735f7ec3a33b900004fbcd04f96df Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 14 Aug 2025 10:58:57 +0200 Subject: [PATCH 51/58] Fixing xgrid unit test --- tests/v4/test_xgrid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/v4/test_xgrid.py b/tests/v4/test_xgrid.py index d132e5900..f987cf798 100644 --- a/tests/v4/test_xgrid.py +++ b/tests/v4/test_xgrid.py @@ -131,7 +131,7 @@ def test_xgrid_search_cpoints(ds): axis_indices = {"Z": 0, "Y": yi, "X": xi} lat, lon = lat_array[yi, xi], lon_array[yi, xi] - axis_indices_bcoords = grid.search(0, lat, lon, ei=None) + axis_indices_bcoords = grid.search(0, np.atleast_1d(lat), np.atleast_1d(lon), ei=None) axis_indices_test = {k: v[0] for k, v in axis_indices_bcoords.items()} assert axis_indices == axis_indices_test From 316f15e6f2843e5a64d2227810e3302661f696a9 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 14 Aug 2025 10:59:25 +0200 Subject: [PATCH 52/58] Fixing particleset_execution unit tests --- tests/v4/test_particleset_execute.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/v4/test_particleset_execute.py b/tests/v4/test_particleset_execute.py index 12d5b423f..d72227347 100644 --- a/tests/v4/test_particleset_execute.py +++ b/tests/v4/test_particleset_execute.py @@ -48,8 +48,7 @@ def test_pset_stop_simulation(fieldset, npart): pset = ParticleSet(fieldset, lon=np.zeros(npart), lat=np.zeros(npart), pclass=Particle) def Delete(particle, fieldset, time): # pragma: no cover - if time >= fieldset.time_interval.left + np.timedelta64(4, "s"): - return StatusCode.StopExecution + particle[particle.time >= fieldset.time_interval.left + np.timedelta64(4, "s")].state = StatusCode.StopExecution pset.execute(Delete, dt=np.timedelta64(1, "s"), runtime=np.timedelta64(21, "s")) assert pset[0].time == fieldset.time_interval.left + np.timedelta64(4, "s") @@ -91,9 +90,13 @@ def test_dont_run_particles_outside_starttime(fieldset): start_times = [fieldset.time_interval.left + np.timedelta64(t, "s") for t in [0, 2, 10]] endtime = fieldset.time_interval.left + np.timedelta64(8, "s") + def AddLon(particle, fieldset, time): # pragma: no cover + particle.lon += 1 + pset = ParticleSet(fieldset, lon=np.zeros(len(start_times)), lat=np.zeros(len(start_times)), time=start_times) + pset.execute(AddLon, dt=np.timedelta64(1, "s"), endtime=endtime) - pset.execute(DoNothing, dt=np.timedelta64(1, "s"), endtime=endtime) + np.testing.assert_array_equal(pset.lon, [8, 6, 0]) assert pset.time_nextloop[0:1] == endtime assert pset.time_nextloop[2] == start_times[2] # this particle has not been executed @@ -102,8 +105,9 @@ def test_dont_run_particles_outside_starttime(fieldset): endtime = fieldset.time_interval.right - np.timedelta64(8, "s") pset = ParticleSet(fieldset, lon=np.zeros(len(start_times)), lat=np.zeros(len(start_times)), time=start_times) + pset.execute(AddLon, dt=-np.timedelta64(1, "s"), endtime=endtime) - pset.execute(DoNothing, dt=-np.timedelta64(1, "s"), endtime=endtime) + np.testing.assert_array_equal(pset.lon, [8, 6, 0]) assert pset.time_nextloop[0:1] == endtime assert pset.time_nextloop[2] == start_times[2] # this particle has not been executed From 9652bf70b98c969160f7caa1ef80e1139294effe Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 14 Aug 2025 13:06:55 +0200 Subject: [PATCH 53/58] Adding and extending unit tests on particle errors --- parcels/field.py | 4 +- parcels/kernel.py | 25 +++--- parcels/particleset.py | 4 +- tests/test_kernel_execution.py | 129 --------------------------- tests/v4/test_particleset_execute.py | 90 +++++++++++++++++++ 5 files changed, 108 insertions(+), 144 deletions(-) diff --git a/parcels/field.py b/parcels/field.py index de9c3fa43..8c2a55c7a 100644 --- a/parcels/field.py +++ b/parcels/field.py @@ -36,9 +36,9 @@ def _deal_with_errors(error, key, vector_type: VectorType): - if isinstance(key, ParticleSet): + if isinstance(key, (ParticleSet, KernelParticle)): key.state = AllParcelsErrorCodes[type(error)] - elif isinstance(key[-1], ParticleSet): + elif isinstance(key[-1], (ParticleSet, KernelParticle)): key[-1].state = AllParcelsErrorCodes[type(error)] else: raise RuntimeError(f"{error}. Error could not be handled because particle was not part of the Field Sampling.") diff --git a/parcels/kernel.py b/parcels/kernel.py index 017fc4237..6ded63299 100644 --- a/parcels/kernel.py +++ b/parcels/kernel.py @@ -15,10 +15,10 @@ from parcels.basegrid import GridType from parcels.tools.statuscodes import ( StatusCode, - TimeExtrapolationError, _raise_field_out_of_bound_error, _raise_field_out_of_bound_surface_error, _raise_field_sampling_error, + _raise_time_extrapolation_error, ) from parcels.tools.warnings import KernelWarning @@ -236,9 +236,10 @@ def execute(self, pset, endtime, dt): time_to_endtime = compute_time_direction * (endtime - pset.time_nextloop) if all(time_to_endtime <= 0): - return pset + return StatusCode.Success - pre_dt = pset.dt # TODO check if needed + # keep a copy of dt in case it's changed below + pre_dt = pset.dt.copy() try: # Use next_dt from AdvectionRK45 if it is set if compute_time_direction == 1: @@ -256,29 +257,33 @@ def execute(self, pset, endtime, dt): # TODO remove "time" from kernel signature in v4; because it doesn't make sense for vectorized particles f(pset[inds], self._fieldset, None) + # revert to old dt pset.dt = pre_dt # Reset particle state for particles that signalled success and have not reached endtime yet particles_to_evaluate = (pset.state == StatusCode.Success) & (time_to_endtime > 0) - pset.state = np.where(particles_to_evaluate, StatusCode.Evaluate, pset.state) + pset[particles_to_evaluate].state = StatusCode.Evaluate # delete particles that signalled deletion self.remove_deleted(pset) # check and throw errors - if np.any(pset.state == StatusCode.ErrorTimeExtrapolation): - inds = pset.state == StatusCode.ErrorTimeExtrapolation - raise TimeExtrapolationError(pset[inds].time) + if np.any(pset.state == StatusCode.StopAllExecution): + return StatusCode.StopAllExecution - errors_to_check = { + errors_to_throw = { + StatusCode.ErrorTimeExtrapolation: _raise_time_extrapolation_error, StatusCode.ErrorOutOfBounds: _raise_field_out_of_bound_error, StatusCode.ErrorThroughSurface: _raise_field_out_of_bound_surface_error, StatusCode.Error: _raise_field_sampling_error, } - for error_code, error_func in errors_to_check.items(): + for error_code, error_func in errors_to_throw.items(): if np.any(pset.state == error_code): inds = pset.state == error_code - error_func(pset[inds].depth, pset[inds].lat, pset[inds].lon) + if error_code == StatusCode.ErrorTimeExtrapolation: + error_func(pset[inds].time) + else: + error_func(pset[inds].depth, pset[inds].lat, pset[inds].lon) return pset diff --git a/parcels/particleset.py b/parcels/particleset.py index ff7f06d66..871d151b3 100644 --- a/parcels/particleset.py +++ b/parcels/particleset.py @@ -580,9 +580,7 @@ def execute( next_time = end_time # TODO update to min(next_output, end_time) when ParticleFile works else: next_time = end_time # TODO update to max(next_output, end_time) when ParticleFile works - res = self._kernel.execute(self, endtime=next_time, dt=dt) - if res == StatusCode.StopAllExecution: - return StatusCode.StopAllExecution + self._kernel.execute(self, endtime=next_time, dt=dt) # TODO: Handle IO timing based of timedelta or datetime objects if next_output: diff --git a/tests/test_kernel_execution.py b/tests/test_kernel_execution.py index e9e72e62c..6a61f2986 100644 --- a/tests/test_kernel_execution.py +++ b/tests/test_kernel_execution.py @@ -2,13 +2,10 @@ import pytest from parcels import ( - FieldOutOfBoundError, FieldSet, Particle, ParticleSet, - StatusCode, ) -from tests.common_kernels import DeleteParticle, DoNothing, MoveEast, MoveNorth from tests.utils import create_fieldset_unit_mesh @@ -54,132 +51,6 @@ def SampleP(particle, fieldset, time): # pragma: no cover assert np.allclose(lons[0], 0.2) -@pytest.mark.parametrize( - "start, end, substeps, dt", - [ - (0.0, 10.0, 1, 1.0), - (0.0, 10.0, 4, 1.0), - (0.0, 10.0, 1, 3.0), - (2.0, 16.0, 5, 3.0), - (20.0, 10.0, 4, -1.0), - (20.0, -10.0, 7, -2.0), - ], -) -def test_execution_endtime(fieldset_unit_mesh, start, end, substeps, dt): - npart = 10 - pset = ParticleSet( - fieldset_unit_mesh, pclass=Particle, time=start, lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart) - ) - pset.execute(DoNothing, endtime=end, dt=dt) - assert np.allclose(pset.time_nextloop, end) - - -@pytest.mark.parametrize( - "start, end, substeps, dt", - [ - (0.0, 10.0, 1, 1.0), - (0.0, 10.0, 4, 1.0), - (0.0, 10.0, 1, 3.0), - (2.0, 16.0, 5, 3.0), - (20.0, 10.0, 4, -1.0), - (20.0, -10.0, 7, -2.0), - ], -) -def test_execution_runtime(fieldset_unit_mesh, start, end, substeps, dt): - npart = 10 - pset = ParticleSet( - fieldset_unit_mesh, pclass=Particle, time=start, lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart) - ) - t_step = abs(end - start) / substeps - for _ in range(substeps): - pset.execute(DoNothing, runtime=t_step, dt=dt) - assert np.allclose(pset.time_nextloop, end) - - -def test_execution_fail_out_of_bounds(fieldset_unit_mesh): - npart = 10 - - def MoveRight(particle, fieldset, time): # pragma: no cover - tmp1, tmp2 = fieldset.UV[time, particle.depth, particle.lat, particle.lon + 0.1, particle] - particle.dlon += 0.1 - - pset = ParticleSet(fieldset_unit_mesh, pclass=Particle, lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart)) - with pytest.raises(FieldOutOfBoundError): - pset.execute(MoveRight, endtime=10.0, dt=1.0) - assert len(pset) == npart - assert (pset.lon - 1.0 > -1.0e12).all() - - -def test_execution_recover_out_of_bounds(fieldset_unit_mesh): - npart = 2 - - def MoveRight(particle, fieldset, time): # pragma: no cover - tmp1, tmp2 = fieldset.UV[time, particle.depth, particle.lat, particle.lon + 0.1, particle] - particle.dlon += 0.1 - - def MoveLeft(particle, fieldset, time): # pragma: no cover - if particle.state == StatusCode.ErrorOutOfBounds: - particle.dlon -= 1.0 - particle.state = StatusCode.Success - - lon = np.linspace(0.05, 0.95, npart) - lat = np.linspace(1, 0, npart) - pset = ParticleSet(fieldset_unit_mesh, pclass=Particle, lon=lon, lat=lat) - pset.execute([MoveRight, MoveLeft], endtime=11.0, dt=1.0) - assert len(pset) == npart - assert np.allclose(pset.lon, lon, rtol=1e-5) - assert np.allclose(pset.lat, lat, rtol=1e-5) - - -def test_execution_check_all_errors(fieldset_unit_mesh): - def MoveRight(particle, fieldset, time): # pragma: no cover - tmp1, tmp2 = fieldset.UV[time, particle.depth, particle.lat, particle.lon, particle] - - def RecoverAllErrors(particle, fieldset, time): # pragma: no cover - if particle.state > 4: - particle.state = StatusCode.Delete - - pset = ParticleSet(fieldset_unit_mesh, pclass=Particle, lon=10, lat=0) - pset.execute([MoveRight, RecoverAllErrors], endtime=11.0, dt=1.0) - assert len(pset) == 0 - - -def test_execution_check_stopallexecution(fieldset_unit_mesh): - def addoneLon(particle, fieldset, time): # pragma: no cover - particle.dlon += 1 - - if particle.lon + particle.dlon >= 10: - particle.state = StatusCode.StopAllExecution - - pset = ParticleSet(fieldset_unit_mesh, pclass=Particle, lon=[0, 1], lat=[0, 0]) - pset.execute(addoneLon, endtime=20.0, dt=1.0) - assert pset[0].lon == 9 - assert pset[0].time == 9 - assert pset[1].lon == 1 - assert pset[1].time == 0 - - -def test_execution_delete_out_of_bounds(fieldset_unit_mesh): - npart = 10 - - def MoveRight(particle, fieldset, time): # pragma: no cover - tmp1, tmp2 = fieldset.UV[time, particle.depth, particle.lat, particle.lon + 0.1, particle] - particle.dlon += 0.1 - - lon = np.linspace(0.05, 0.95, npart) - lat = np.linspace(1, 0, npart) - pset = ParticleSet(fieldset_unit_mesh, pclass=Particle, lon=lon, lat=lat) - pset.execute([MoveRight, DeleteParticle], endtime=10.0, dt=1.0) - assert len(pset) == 0 - - -def test_kernel_add_no_new_variables(fieldset_unit_mesh): - pset = ParticleSet(fieldset_unit_mesh, pclass=Particle, lon=[0.5], lat=[0.5]) - pset.execute(pset.Kernel(MoveEast) + pset.Kernel(MoveNorth), endtime=2.0, dt=1.0) - assert np.allclose(pset.lon, 0.6, rtol=1e-5) - assert np.allclose(pset.lat, 0.6, rtol=1e-5) - - def test_multi_kernel_duplicate_varnames(fieldset_unit_mesh): # Testing for merging of two Kernels with the same variable declared # Should throw a warning, but go ahead regardless diff --git a/tests/v4/test_particleset_execute.py b/tests/v4/test_particleset_execute.py index d72227347..7cf6aa21a 100644 --- a/tests/v4/test_particleset_execute.py +++ b/tests/v4/test_particleset_execute.py @@ -11,8 +11,10 @@ UXPiecewiseConstantFace, VectorField, ) +from parcels._datasets.structured.generated import simple_UV_dataset from parcels._datasets.structured.generic import datasets as datasets_structured from parcels._datasets.unstructured.generic import datasets as datasets_unstructured +from parcels.tools.statuscodes import FieldOutOfBoundError, TimeExtrapolationError from parcels.uxgrid import UxGrid from parcels.xgrid import XGrid from tests import utils @@ -28,6 +30,17 @@ def fieldset() -> FieldSet: return FieldSet([U, V]) +@pytest.fixture +def zonal_flow_fieldset() -> FieldSet: + ds = simple_UV_dataset(mesh_type="flat") + ds["U"].data[:] = 1.0 + grid = XGrid.from_dataset(ds) + U = Field("U", ds["U"], grid, mesh_type="flat") + V = Field("V", ds["V"], grid, mesh_type="flat") + UV = VectorField("UV", U, V) + return FieldSet([U, V, UV]) + + def test_pset_remove_particle_in_kernel(fieldset): npart = 100 pset = ParticleSet(fieldset, lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart)) @@ -112,6 +125,72 @@ def AddLon(particle, fieldset, time): # pragma: no cover assert pset.time_nextloop[2] == start_times[2] # this particle has not been executed +def test_some_particles_throw_outofbounds(zonal_flow_fieldset): + npart = 100 + lon = np.linspace(0, 9e5, npart) + pset = ParticleSet(zonal_flow_fieldset, lon=lon, lat=np.zeros_like(lon)) + + with pytest.raises(FieldOutOfBoundError): + pset.execute(AdvectionEE, runtime=np.timedelta64(1_000_000, "s"), dt=np.timedelta64(10_000, "s")) + + +def test_delete_on_all_errors(fieldset): + def MoveRight(particle, fieldset, time): # pragma: no cover + particle.dlon += 1 + fieldset.U[particle.time, particle.depth, particle.lat, particle.lon, particle] + + def DeleteAllErrorParticles(particle, fieldset, time): # pragma: no cover + particle[particle.state > 20].state = StatusCode.Delete + + pset = ParticleSet(fieldset, lon=[1e5, 2], lat=[0, 0]) + pset.execute([MoveRight, DeleteAllErrorParticles], runtime=np.timedelta64(10, "s"), dt=np.timedelta64(1, "s")) + assert len(pset) == 0 + + +def test_some_particles_throw_outoftime(fieldset): + time = [fieldset.time_interval.left + np.timedelta64(t, "D") for t in [0, 350]] + pset = ParticleSet(fieldset, lon=np.zeros_like(time), lat=np.zeros_like(time), time=time) + + def FieldAccessOutsideTime(particle, fieldset, time): # pragma: no cover + fieldset.U[particle.time + np.timedelta64(1, "D"), particle.depth, particle.lat, particle.lon, particle] + + with pytest.raises(TimeExtrapolationError): + pset.execute(FieldAccessOutsideTime, runtime=np.timedelta64(400, "D"), dt=np.timedelta64(10, "D")) + + +def test_execution_check_stopallexecution(fieldset): + def addoneLon(particle, fieldset, time): # pragma: no cover + particle.dlon += 1 + particle[particle.lon + particle.dlon >= 10].state = StatusCode.StopAllExecution + + pset = ParticleSet(fieldset, lon=[0, 0], lat=[0, 0]) + pset.execute(addoneLon, runtime=np.timedelta64(20, "s"), dt=np.timedelta64(1, "s")) + np.testing.assert_allclose(pset.lon, 9) + np.testing.assert_allclose(pset.time - fieldset.time_interval.left, np.timedelta64(9, "s")) + + +def test_execution_recover_out_of_bounds(fieldset): + npart = 2 + + def MoveRight(particle, fieldset, time): # pragma: no cover + fieldset.U[particle.time, particle.depth, particle.lat, particle.lon + 0.1, particle] + particle.dlon += 0.1 + + def MoveLeft(particle, fieldset, time): # pragma: no cover + inds = np.where(particle.state == StatusCode.ErrorOutOfBounds) + print(inds, particle.state) + particle[inds].dlon -= 1.0 + particle[inds].state = StatusCode.Success + + lon = np.linspace(0.05, 6.95, npart) + lat = np.linspace(1, 0, npart) + pset = ParticleSet(fieldset, lon=lon, lat=lat) + pset.execute([MoveRight, MoveLeft], runtime=np.timedelta64(61, "s"), dt=np.timedelta64(1, "s")) + assert len(pset) == npart + np.testing.assert_allclose(pset.lon, [6.05, 5.95], rtol=1e-5) + np.testing.assert_allclose(pset.lat, lat, rtol=1e-5) + + @pytest.mark.parametrize( "starttime, runtime, dt", [(0, 10, 1), (0, 10, 3), (2, 16, 3), (20, 10, -1), (20, 0, -2), (5, 15, None)], @@ -127,6 +206,17 @@ def test_execution_runtime(fieldset, starttime, runtime, dt, npart): assert all([abs(p.time_nextloop - starttime - runtime * sign_dt) < np.timedelta64(1, "ms") for p in pset]) +def test_changing_dt_in_kernel(fieldset): + def KernelCounter(particle, fieldset, time): # pragma: no cover + particle.lon += 1 + + pset = ParticleSet(fieldset, lon=np.zeros(1), lat=np.zeros(1)) + pset.execute(KernelCounter, dt=np.timedelta64(2, "s"), runtime=np.timedelta64(5, "s")) + assert pset.lon == 3 + print(pset.dt) + assert pset.dt == np.timedelta64(2, "s") + + @pytest.mark.parametrize("npart", [1, 100]) def test_execution_fail_python_exception(fieldset, npart): pset = ParticleSet(fieldset, lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart)) From 4c90bbc79661f2f7be2c4217c7f1072fc1e151fe Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Thu, 14 Aug 2025 20:44:40 +0200 Subject: [PATCH 54/58] Cleaning up Kernel for RK45 --- parcels/application_kernels/advection.py | 28 +++++++------ parcels/kernel.py | 37 +++++++++--------- tests/v4/test_advection.py | 50 ++++++++---------------- 3 files changed, 50 insertions(+), 65 deletions(-) diff --git a/parcels/application_kernels/advection.py b/parcels/application_kernels/advection.py index e3590dd0b..928da7d53 100644 --- a/parcels/application_kernels/advection.py +++ b/parcels/application_kernels/advection.py @@ -115,15 +115,7 @@ def AdvectionRK45(particle, fieldset, time): # pragma: no cover Time-step dt is halved if error is larger than fieldset.RK45_tol, and doubled if error is smaller than 1/10th of tolerance. """ - dt = particle.next_dt / np.timedelta64(1, "s") # TODO: improve API for converting dt to seconds - particle.next_dt = np.where( - dt > fieldset.RK45_max_dt, fieldset.RK45_max_dt * np.timedelta64(1, "s"), particle.next_dt - ) - repeat_particles = dt < fieldset.RK45_min_dt - particle.next_dt = np.where(repeat_particles, fieldset.RK45_min_dt * np.timedelta64(1, "s"), particle.next_dt) - particle.state = np.where(dt < fieldset.RK45_min_dt, StatusCode.Repeat, particle.state) - particle.dt = particle.next_dt - dt = np.where(dt > fieldset.RK45_max_dt, fieldset.RK45_max_dt, dt) + dt = particle.dt / np.timedelta64(1, "s") # TODO: improve API for converting dt to seconds c = [1.0 / 4.0, 3.0 / 8.0, 12.0 / 13.0, 1.0, 1.0 / 2.0] A = [ @@ -168,17 +160,27 @@ def AdvectionRK45(particle, fieldset, time): # pragma: no cover kappa = np.sqrt(np.pow(lon_5th - lon_4th, 2) + np.pow(lat_5th - lat_4th, 2)) good_particles = (kappa <= fieldset.RK45_tol) | (np.fabs(dt) <= np.fabs(fieldset.RK45_min_dt)) - particle.dlon += np.where(good_particles, lon_4th, 0) - particle.dlat += np.where(good_particles, lat_4th, 0) + particle.dlon += np.where(good_particles, lon_5th, 0) + particle.dlat += np.where(good_particles, lat_5th, 0) increase_dt_particles = ( good_particles & (kappa <= fieldset.RK45_tol / 10) & (np.fabs(dt * 2) <= np.fabs(fieldset.RK45_max_dt)) ) - particle.next_dt = np.where(increase_dt_particles, particle.next_dt * 2, particle.next_dt) + particle.dt = np.where(increase_dt_particles, particle.dt * 2, particle.dt) + particle.dt = np.where( + particle.dt > fieldset.RK45_max_dt * np.timedelta64(1, "s"), + fieldset.RK45_max_dt * np.timedelta64(1, "s"), + particle.dt, + ) particle.state = np.where(good_particles, StatusCode.Success, particle.state) repeat_particles = np.invert(good_particles) - particle.next_dt = np.where(repeat_particles, particle.next_dt / 2, particle.next_dt) + particle.dt = np.where(repeat_particles, particle.dt / 2, particle.dt) + particle.dt = np.where( + particle.dt < fieldset.RK45_min_dt * np.timedelta64(1, "s"), + fieldset.RK45_min_dt * np.timedelta64(1, "s"), + particle.dt, + ) particle.state = np.where(repeat_particles, StatusCode.Repeat, particle.state) diff --git a/parcels/kernel.py b/parcels/kernel.py index 6ded63299..4cd8f3777 100644 --- a/parcels/kernel.py +++ b/parcels/kernel.py @@ -238,27 +238,26 @@ def execute(self, pset, endtime, dt): if all(time_to_endtime <= 0): return StatusCode.Success - # keep a copy of dt in case it's changed below - pre_dt = pset.dt.copy() - - try: # Use next_dt from AdvectionRK45 if it is set - if compute_time_direction == 1: - pset.next_dt = np.maximum(np.minimum(pset.next_dt, time_to_endtime), 0) - else: - pset.next_dt = np.minimum(np.maximum(pset.next_dt, -time_to_endtime), 0) - except KeyError: - if compute_time_direction == 1: - pset.dt = np.maximum(np.minimum(pset.dt, time_to_endtime), 0) - else: - pset.dt = np.minimum(np.maximum(pset.dt, -time_to_endtime), 0) - - inds = (np.isin(pset.state, [StatusCode.Evaluate, StatusCode.Repeat])) & (pset.dt != 0) + # adapt dt to end exactly on endtime + if compute_time_direction == 1: + pset.dt = np.maximum(np.minimum(pset.dt, time_to_endtime), 0) + else: + pset.dt = np.minimum(np.maximum(pset.dt, -time_to_endtime), 0) + + # run kernels for all particles that need to be evaluated + evaluate_particles = (pset.state == StatusCode.Evaluate) & (pset.dt != 0) for f in self._pyfuncs: - # TODO remove "time" from kernel signature in v4; because it doesn't make sense for vectorized particles - f(pset[inds], self._fieldset, None) + f(pset[evaluate_particles], self._fieldset, None) - # revert to old dt - pset.dt = pre_dt + # check for particles that have to be repeated + repeat_particles = pset.state == StatusCode.Repeat + while np.any(repeat_particles): + f(pset[repeat_particles], self._fieldset, None) + repeat_particles = pset.state == StatusCode.Repeat + + # revert to original dt (unless in RK45 mode) + if not hasattr(self.fieldset, "RK45_tol"): + pset._data["dt"][:] = dt # Reset particle state for particles that signalled success and have not reached endtime yet particles_to_evaluate = (pset.state == StatusCode.Success) & (time_to_endtime > 0) diff --git a/tests/v4/test_advection.py b/tests/v4/test_advection.py index a35b83af7..c83c46318 100644 --- a/tests/v4/test_advection.py +++ b/tests/v4/test_advection.py @@ -238,7 +238,7 @@ def test_radialrotation(npart=10): ("AdvDiffM1", 1e-2), ("RK4", 1e-5), ("RK4_3D", 1e-5), - ("RK45", 1e-5), + ("RK45", 1e-4), ], ) def test_moving_eddy(method, rtol): @@ -262,18 +262,13 @@ def test_moving_eddy(method, rtol): fieldset.add_constant("dres", 0.1) start_lon, start_lat, start_depth = 12000, 12500, 12500 - dt = np.timedelta64(3, "m") + dt = np.timedelta64(30, "m") if method == "RK45": - # Use RK45Particles to set next_dt - RK45Particles = Particle.add_variable(Variable("next_dt", initial=dt, dtype="timedelta64[s]")) - fieldset.add_constant("RK45_tol", 1e-3) + fieldset.add_constant("RK45_tol", rtol) - pclass = RK45Particles if method == "RK45" else Particle - pset = ParticleSet( - fieldset, pclass=pclass, lon=start_lon, lat=start_lat, depth=start_depth, time=np.timedelta64(0, "s") - ) - pset.execute(kernel[method], dt=dt, endtime=np.timedelta64(6, "h")) + pset = ParticleSet(fieldset, lon=start_lon, lat=start_lat, depth=start_depth, time=np.timedelta64(0, "s")) + pset.execute(kernel[method], dt=dt, endtime=np.timedelta64(1, "h")) def truth_moving(x_0, y_0, t): t /= np.timedelta64(1, "s") @@ -291,9 +286,9 @@ def truth_moving(x_0, y_0, t): @pytest.mark.parametrize( "method, rtol", [ - ("EE", 1e-2), + ("EE", 1e-1), ("RK4", 1e-5), - ("RK45", 2e-5), + ("RK45", 1e-4), ], ) def test_decaying_moving_eddy(method, rtol): @@ -305,16 +300,13 @@ def test_decaying_moving_eddy(method, rtol): fieldset = FieldSet([U, V, UV]) start_lon, start_lat = 10000, 10000 - dt = np.timedelta64(2, "m") + dt = np.timedelta64(30, "m") if method == "RK45": - # Use RK45Particles to set next_dt - RK45Particles = Particle.add_variable(Variable("next_dt", initial=dt, dtype="timedelta64[s]")) - fieldset.add_constant("RK45_tol", 1e-3) + fieldset.add_constant("RK45_tol", rtol) + fieldset.add_constant("RK45_min_dt", 60) - pclass = RK45Particles if method == "RK45" else Particle - - pset = ParticleSet(fieldset, pclass=pclass, lon=start_lon, lat=start_lat, time=np.timedelta64(0, "s")) + pset = ParticleSet(fieldset, lon=start_lon, lat=start_lat, time=np.timedelta64(0, "s")) pset.execute(kernel[method], dt=dt, endtime=np.timedelta64(1, "D")) def truth_moving(x_0, y_0, t): @@ -341,7 +333,7 @@ def truth_moving(x_0, y_0, t): "method, atol", [ ("RK4", 1), - ("RK45", 1), + ("RK45", 10), ], ) @pytest.mark.parametrize("grid_type", ["A"]) # TODO also implement C-grid once available @@ -368,19 +360,11 @@ def test_gyre_flowfields(method, grid_type, atol, flowfield): dt = np.timedelta64(1, "m") # TODO check these settings (and possibly increase) if method == "RK45": - # Use RK45Particles to set next_dt - SampleParticle = Particle.add_variable( - [ - Variable("p", initial=0.0, dtype=np.float32), - Variable("p_start", initial=0.0, dtype=np.float32), - Variable("next_dt", initial=dt, dtype="timedelta64[s]"), - ] - ) - fieldset.add_constant("RK45_tol", 1e-3) - else: - SampleParticle = Particle.add_variable( - [Variable("p", initial=0.0, dtype=np.float32), Variable("p_start", initial=0.0, dtype=np.float32)] - ) + fieldset.add_constant("RK45_tol", atol) + + SampleParticle = Particle.add_variable( + [Variable("p", initial=0.0, dtype=np.float32), Variable("p_start", initial=0.0, dtype=np.float32)] + ) def UpdateP(particle, fieldset, time): # pragma: no cover particle.p = fieldset.P[particle.time, particle.depth, particle.lat, particle.lon] From 161ad9ce5d906be8904edf4df7e4c320e4b96053 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Fri, 15 Aug 2025 07:03:41 +0200 Subject: [PATCH 55/58] updating advection tests for faster runtime --- tests/v4/test_advection.py | 73 +++++++++++++++++++++++++++----------- 1 file changed, 52 insertions(+), 21 deletions(-) diff --git a/tests/v4/test_advection.py b/tests/v4/test_advection.py index c83c46318..7b209ab30 100644 --- a/tests/v4/test_advection.py +++ b/tests/v4/test_advection.py @@ -300,11 +300,11 @@ def test_decaying_moving_eddy(method, rtol): fieldset = FieldSet([U, V, UV]) start_lon, start_lat = 10000, 10000 - dt = np.timedelta64(30, "m") + dt = np.timedelta64(60, "m") if method == "RK45": fieldset.add_constant("RK45_tol", rtol) - fieldset.add_constant("RK45_min_dt", 60) + fieldset.add_constant("RK45_min_dt", 10 * 60) pset = ParticleSet(fieldset, lon=start_lon, lat=start_lat, time=np.timedelta64(0, "s")) pset.execute(kernel[method], dt=dt, endtime=np.timedelta64(1, "D")) @@ -328,28 +328,17 @@ def truth_moving(x_0, y_0, t): np.testing.assert_allclose(pset.lat_nextloop, exp_lat, rtol=rtol) -# TODO decrease atol for these tests once the C-grid is implemented @pytest.mark.parametrize( - "method, atol", + "method, rtol", [ - ("RK4", 1), - ("RK45", 10), + ("RK4", 0.1), + ("RK45", 0.1), ], ) @pytest.mark.parametrize("grid_type", ["A"]) # TODO also implement C-grid once available -@pytest.mark.parametrize("flowfield", ["stommel_gyre", "peninsula"]) -def test_gyre_flowfields(method, grid_type, atol, flowfield): +def test_stommelgyre_fieldset(method, rtol, grid_type): npart = 2 - if flowfield == "peninsula": - ds = peninsula_dataset(grid_type=grid_type) - start_lat = np.linspace(3e3, 47e3, npart) - start_lon = 3e3 * np.ones_like(start_lat) - runtime = np.timedelta64(1, "D") - else: - ds = stommel_gyre_dataset(grid_type=grid_type) - start_lon = np.linspace(10e3, 100e3, npart) - start_lat = np.ones_like(start_lon) * 5000e3 - runtime = np.timedelta64(2, "D") + ds = stommel_gyre_dataset(grid_type=grid_type) grid = XGrid.from_dataset(ds) U = Field("U", ds["U"], grid, interp_method=XLinear) V = Field("V", ds["V"], grid, interp_method=XLinear) @@ -357,10 +346,52 @@ def test_gyre_flowfields(method, grid_type, atol, flowfield): UV = VectorField("UV", U, V) fieldset = FieldSet([U, V, P, UV]) - dt = np.timedelta64(1, "m") # TODO check these settings (and possibly increase) + dt = np.timedelta64(30, "m") + runtime = np.timedelta64(1, "D") + start_lon = np.linspace(10e3, 100e3, npart) + start_lat = np.ones_like(start_lon) * 5000e3 if method == "RK45": - fieldset.add_constant("RK45_tol", atol) + fieldset.add_constant("RK45_tol", rtol) + + SampleParticle = Particle.add_variable( + [Variable("p", initial=0.0, dtype=np.float32), Variable("p_start", initial=0.0, dtype=np.float32)] + ) + + def UpdateP(particle, fieldset, time): # pragma: no cover + particle.p = fieldset.P[particle.time, particle.depth, particle.lat, particle.lon] + particle.p_start = np.where(particle.time == 0, particle.p, particle.p_start) + + pset = ParticleSet(fieldset, pclass=SampleParticle, lon=start_lon, lat=start_lat, time=np.timedelta64(0, "s")) + pset.execute([kernel[method], UpdateP], dt=dt, runtime=runtime) + np.testing.assert_allclose(pset.p, pset.p_start, rtol=rtol) + + +@pytest.mark.parametrize( + "method, rtol", + [ + ("RK4", 5e-3), + ("RK45", 1e-4), + ], +) +@pytest.mark.parametrize("grid_type", ["A"]) # TODO also implement C-grid once available +def test_peninsula_fieldset(method, rtol, grid_type): + npart = 2 + ds = peninsula_dataset(grid_type=grid_type) + grid = XGrid.from_dataset(ds) + U = Field("U", ds["U"], grid, interp_method=XLinear) + V = Field("V", ds["V"], grid, interp_method=XLinear) + P = Field("P", ds["P"], grid, interp_method=XLinear) + UV = VectorField("UV", U, V) + fieldset = FieldSet([U, V, P, UV]) + + dt = np.timedelta64(30, "m") + runtime = np.timedelta64(1, "D") + start_lat = np.linspace(3e3, 47e3, npart) + start_lon = 3e3 * np.ones_like(start_lat) + + if method == "RK45": + fieldset.add_constant("RK45_tol", rtol) SampleParticle = Particle.add_variable( [Variable("p", initial=0.0, dtype=np.float32), Variable("p_start", initial=0.0, dtype=np.float32)] @@ -372,4 +403,4 @@ def UpdateP(particle, fieldset, time): # pragma: no cover pset = ParticleSet(fieldset, pclass=SampleParticle, lon=start_lon, lat=start_lat, time=np.timedelta64(0, "s")) pset.execute([kernel[method], UpdateP], dt=dt, runtime=runtime) - np.testing.assert_allclose(pset.p, pset.p_start, atol=atol) + np.testing.assert_allclose(pset.p, pset.p_start, rtol=rtol) From 558b46684dd0440a7b83538abd302eaf01205775 Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Tue, 19 Aug 2025 20:05:56 +0200 Subject: [PATCH 56/58] Update parcels/_core/utils/time.py Co-authored-by: Nick Hodgskin <36369090+VeckoTheGecko@users.noreply.github.com> --- parcels/_core/utils/time.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/parcels/_core/utils/time.py b/parcels/_core/utils/time.py index 94dff4671..97d0a861a 100644 --- a/parcels/_core/utils/time.py +++ b/parcels/_core/utils/time.py @@ -45,6 +45,9 @@ def __init__(self, left: T, right: T) -> None: self.left = left self.right = right + def __contains__(self, item: T) -> bool: + return self.left <= item <= self.right + def is_all_time_in_interval(self, time): item = np.atleast_1d(time) return (self.left <= item).all() and (item <= self.right).all() From d4fdc01e9913db51dd18e4eb1afcf2e6c8250b02 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 18:06:20 +0000 Subject: [PATCH 57/58] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- parcels/_core/utils/time.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parcels/_core/utils/time.py b/parcels/_core/utils/time.py index 97d0a861a..4473e87e4 100644 --- a/parcels/_core/utils/time.py +++ b/parcels/_core/utils/time.py @@ -47,7 +47,7 @@ def __init__(self, left: T, right: T) -> None: def __contains__(self, item: T) -> bool: return self.left <= item <= self.right - + def is_all_time_in_interval(self, time): item = np.atleast_1d(time) return (self.left <= item).all() and (item <= self.right).all() From 5d2bf9a25443193f5a5897bcdba2a72d7a6712de Mon Sep 17 00:00:00 2001 From: Erik van Sebille Date: Tue, 19 Aug 2025 20:14:04 +0200 Subject: [PATCH 58/58] Reverting that key in eval can be a ParticleSet As suggested in https://github.com/OceanParcels/Parcels/pull/2122#discussion_r2284992600 --- parcels/field.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/parcels/field.py b/parcels/field.py index 8c2a55c7a..7c2170ac9 100644 --- a/parcels/field.py +++ b/parcels/field.py @@ -18,7 +18,6 @@ ) from parcels.application_kernels.interpolation import UXPiecewiseLinearNode, XLinear, ZeroInterpolator from parcels.particle import KernelParticle -from parcels.particleset import ParticleSet from parcels.tools.converters import ( UnitConverter, unitconverters_map, @@ -36,9 +35,9 @@ def _deal_with_errors(error, key, vector_type: VectorType): - if isinstance(key, (ParticleSet, KernelParticle)): + if isinstance(key, KernelParticle): key.state = AllParcelsErrorCodes[type(error)] - elif isinstance(key[-1], (ParticleSet, KernelParticle)): + elif isinstance(key[-1], KernelParticle): key[-1].state = AllParcelsErrorCodes[type(error)] else: raise RuntimeError(f"{error}. Error could not be handled because particle was not part of the Field Sampling.") @@ -256,7 +255,7 @@ def eval(self, time: datetime, z, y, x, particle=None, applyConversion=True): def __getitem__(self, key): self._check_velocitysampling() try: - if isinstance(key, (KernelParticle, ParticleSet)): + if isinstance(key, KernelParticle): return self.eval(key.time, key.depth, key.lat, key.lon, key) else: return self.eval(*key) @@ -348,7 +347,7 @@ def eval(self, time: datetime, z, y, x, particle=None, applyConversion=True): def __getitem__(self, key): try: - if isinstance(key, (KernelParticle, ParticleSet)): + if isinstance(key, KernelParticle): return self.eval(key.time, key.depth, key.lat, key.lon, key) else: return self.eval(*key)