diff --git a/Dockerfile b/Dockerfile index e3f3548c5..ceed0e0d1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -71,7 +71,6 @@ RUN pip install -e .[all] # check formatting RUN ruff trimesh -RUN black --check trimesh # run pytest wrapped with xvfb for simple viewer tests RUN xvfb-run pytest --cov=trimesh \ diff --git a/docs/content/contributing.md b/docs/content/contributing.md index 6eafd6d21..f8d48bdeb 100644 --- a/docs/content/contributing.md +++ b/docs/content/contributing.md @@ -71,9 +71,10 @@ if __name__ == '__main__': When you remove the embed and see the profile result you can then tweak the lines that are slow before finishing the function. ### Automatic Formatting -The only check in that's required to pass in CI is `ruff`, which I usually run with: +Trimesh uses `ruff` and `black` configured in `pyproject.toml`, you can run with: ``` ruff . --fix +black . ``` It can fix a lot of formatting issues automatically. We also periodically run `black` to autoformat the codebase. @@ -82,6 +83,7 @@ It can fix a lot of formatting issues automatically. We also periodically run `b Trimesh uses the [Sphinx Numpy-style](https://www.sphinx-doc.org/en/master/usage/extensions/example_numpy.html#example-numpy) docstrings which get parsed into the API reference page. + ## General Tips Python can be fast but only when you use it as little as possible. In general, if you ever have a block which loops through faces and vertices it will be basically unusable with even moderately sized meshes. All operations on face or vertex arrays should be vectorized numpy operations unless absolutely unavoidable. Profiling helps figure out what is slow, but some general advice: diff --git a/docs/content/install.md b/docs/content/install.md index 9a7697227..18c3a3cc4 100644 --- a/docs/content/install.md +++ b/docs/content/install.md @@ -24,8 +24,7 @@ pip install trimesh[all] ``` -Conda Packages --------------- +## Conda Packages If you prefer a `conda` environment, `trimesh` is available on `conda-forge` ([trimesh-feedstock repo](https://github.com/conda-forge/trimesh-feedstock)) @@ -34,20 +33,11 @@ If you install [Miniconda](https://docs.conda.io/projects/miniconda/en/latest/) ``` conda install -c conda-forge trimesh ``` - -Ubuntu-Debian Notes -------------------- -Blender and openSCAD are soft dependencies used for boolean operations with subprocess, you can get them with `apt`: - -``` -sudo apt-get install blender -``` - -Dependency Overview +## Dependency Overview -------------------- -Trimesh has a lot of soft-required upstream packages. We try to make sure they're active and big-ish. Here's a quick summary of what they're used for. +Trimesh has a lot of soft-required upstream packages, and we try to make sure they're actively maintained. Here's a quick summary of what they're used for: | Package | Description | Alternatives | Level | @@ -81,3 +71,16 @@ Trimesh has a lot of soft-required upstream packages. We try to make sure they'r |`pytest-cov`| A plugin to calculate test coverage. | | `test`| |`pyinstrument`| A sampling based profiler for performance tweaking. | | `test`| |`vhacdx`| A binding for VHACD which provides convex decompositions | | `recommend`| + +## Adding A Dependency + +If there's no way to implement something reasonably in vectorized Python or there is a mature minimal C++ or Rust implementation of something useful and complicated we may add a dependency. If it's a major, active project with few dependencies (i.e. `jinja2`) that's probably fine. Otherwise it's a lot more of a commitment than just implementing the function in Python however. An example of this is `embree`, Intel's ray check engine: it is a super complicated thing to do well and 50-100x faster than Python ray checks. + +There are a few projects that we've forked into the [`trimesh`](https://github.com/trimesh/) GitHub organization which you can take a look at. The general idea of the requirements for a new compiled dependency are: + +- is actively maintained and has an MIT/BSD compatible license. +- has all source code in the repository or as a submodule, i.e. no mysterious binary blobs. +- binding preferably uses [pybind11](https://pybind11.readthedocs.io/en/stable/index.html), [nanobind](https://github.com/wjakob/nanobind) or [maturin/py03](https://github.com/PyO3/maturin) for Rust projects. Cython is also OK but other options are preferable if possible. +- uses `cibuildwheel` to publish releases configured in `pyproject.toml`. +- has unit tests which run in CI +- has minimal dependencies: ideally only `numpy`. \ No newline at end of file diff --git a/examples/offscreen_render.py b/examples/offscreen_render.py index 797ce8b91..a8db740a0 100644 --- a/examples/offscreen_render.py +++ b/examples/offscreen_render.py @@ -17,11 +17,11 @@ # a 45 degree homogeneous rotation matrix around # the Y axis at the scene centroid rotate = trimesh.transformations.rotation_matrix( - angle=np.radians(10.0), direction=[0, 1, 0], point=scene.centroid + angle=np.radians(30.0), direction=[1, 0, 0], point=scene.centroid ) - for i in range(4): - trimesh.constants.log.info("Saving image %d", i) + for i in range(10): + trimesh.constants.log.info(f"Saving image {i}") # rotate the camera view transform camera_old, _geometry = scene.graph[scene.camera.name] diff --git a/pyproject.toml b/pyproject.toml index 07a1979c6..2b4aa8c33 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ requires = ["setuptools >= 61.0", "wheel"] [project] name = "trimesh" requires-python = ">=3.7" -version = "4.0.0" +version = "4.0.1" authors = [{name = "Michael Dawson-Haggerty", email = "mikedh@kerfed.com"}] license = {file = "LICENSE.md"} description = "Import, export, process, analyze and view triangular meshes." diff --git a/tests/regression.py b/tests/regression.py index e554dec99..587a24362 100644 --- a/tests/regression.py +++ b/tests/regression.py @@ -31,7 +31,7 @@ def typical_application(): faces = mesh.facets[mesh.facets_area.argmax()] outline = mesh.outline(faces) # NOQA - smoothed = mesh.smoothed() # NOQA + smoothed = mesh.smooth_shaded # NOQA assert mesh.volume > 0.0 diff --git a/tests/test_cache.py b/tests/test_cache.py index 6e515c5ba..d4bc47154 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -356,6 +356,24 @@ def test_validate(self): m.process(validate=True) assert m.triangles.shape == (1, 3, 3) + def test_smooth_shade(self, count=10): + # test to make sure the smooth shaded copy is cached correctly + mesh = g.trimesh.creation.cylinder(radius=1, height=10) + scene = g.trimesh.Scene({"mesh": mesh}) + + initial = scene.camera_transform.copy() + + hashes = set() + for n in range(count): + angle = g.np.pi * n / count + matrix = g.trimesh.transformations.rotation_matrix(angle, [1, 0, 0]) + scene.geometry["mesh"].apply_transform(matrix) + hashes.add(hash(scene.geometry["mesh"].smooth_shaded)) + scene.camera_transform = initial + + # the smooth shade should be unique for every transform + assert len(hashes) == count + if __name__ == "__main__": g.trimesh.util.attach_to_log() diff --git a/tests/test_color.py b/tests/test_color.py index 88f29387e..83e95baad 100644 --- a/tests/test_color.py +++ b/tests/test_color.py @@ -123,7 +123,7 @@ def test_smooth(self): m = g.get_mesh("featuretype.STL") # will put smoothed mesh into visuals cache - s = m.smoothed() + s = m.smooth_shaded # every color should be default color assert s.visual.face_colors.ptp(axis=0).max() == 0 @@ -131,16 +131,16 @@ def test_smooth(self): m.visual.face_colors[0] = [255, 0, 0, 255] # cache should be dumped yo - s1 = m.smoothed() + s1 = m.smooth_shaded assert s1.visual.face_colors.ptp(axis=0).max() != 0 # do the same check on vertex color m = g.get_mesh("featuretype.STL") - s = m.smoothed() + s = m.smooth_shaded # every color should be default color assert s.visual.vertex_colors.ptp(axis=0).max() == 0 m.visual.vertex_colors[g.np.arange(10)] = [255, 0, 0, 255] - s1 = m.smoothed() + s1 = m.smooth_shaded assert s1.visual.face_colors.ptp(axis=0).max() != 0 def test_vertex(self): diff --git a/tests/test_graph.py b/tests/test_graph.py index a389e2383..035e3f10f 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -113,7 +113,7 @@ def test_smoothed(self): for name in ["ADIS16480.STL", "featuretype.STL"]: mesh = g.get_mesh(name) - assert len(mesh.faces) == len(mesh.smoothed().faces) + assert len(mesh.faces) == len(mesh.smooth_shaded.faces) def test_engines(self): edges = g.np.arange(10).reshape((-1, 2)) diff --git a/tests/test_grouping.py b/tests/test_grouping.py index 583d4bde0..52a75d160 100644 --- a/tests/test_grouping.py +++ b/tests/test_grouping.py @@ -78,6 +78,22 @@ def test_blocks(self): assert set(result[0]) == {1} assert all(a[i].all() for i in result) + # make sure wrapping works if all values are True + arr = g.np.ones(10, dtype=bool) + result = blocks(arr, min_len=1, wrap=True, only_nonzero=True) + assert len(result) == 1 + assert set(result[0]) == set(range(10)) + + # and all false + arr = g.np.zeros(10, dtype=bool) + result = blocks(arr, min_len=1, wrap=True, only_nonzero=True) + assert len(result) == 0 + + arr = g.np.zeros(10, dtype=bool) + result = blocks(arr, min_len=1, wrap=True, only_nonzero=False) + assert len(result) == 1 + assert set(result[0]) == set(range(10)) + def test_block_wrap(self): """ Test blocks with wrapping diff --git a/tests/test_interval.py b/tests/test_interval.py index 245c13919..8e942d11f 100644 --- a/tests/test_interval.py +++ b/tests/test_interval.py @@ -15,25 +15,42 @@ def test_intersection(self): [[5, 15], [7, 10]], [[5, 10], [10, 9]], [[0, 1], [0.9, 10]], - ] + [[1000, 1001], [2000, 2001]], + ], + dtype=g.np.float64, ) - tru_hit = [False, False, False, True, True, True, True] - tru_int = g.np.array( - [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [10, 20], [7, 10], [9, 10], [0.9, 1.0]] + + # true intersection ranges + truth = g.np.array( + [ + [0.0, 0.0], + [0.0, 0.0], + [0.0, 0.0], + [10, 20], + [7, 10], + [9, 10], + [0.9, 1.0], + [0, 0], + ], + dtype=g.np.float64, ) - func = g.trimesh.interval.intersection + intersection = g.trimesh.interval.intersection + union = g.trimesh.interval.union # check the single- interval results - for ab, h, i in zip(pairs, tru_hit, tru_int): - r_h, r_i = func(*ab) - assert g.np.allclose(r_i, i) - assert r_h == h + for ab, tru in zip(pairs, truth): + result = intersection(*ab) + assert g.np.allclose(result, tru) # check the vectorized multiple interval results - r_h, r_i = func(pairs[:, 0, :], pairs[:, 1, :]) - assert g.np.allclose(r_h, tru_hit) - assert g.np.allclose(r_i, tru_int) + inter = intersection(pairs[:, 0, :], pairs[:, 1, :]) + + assert g.np.allclose(truth, inter) + + # now just run a union on these for the fun of it + u = union(pairs.reshape((-1, 2))) + assert g.np.allclose(u, [[0.0, 21.0], [1000.0, 1001.0], [2000.0, 2001.0]]) if __name__ == "__main__": diff --git a/tests/test_mesh.py b/tests/test_mesh.py index 33bcf927f..3aac4d58f 100644 --- a/tests/test_mesh.py +++ b/tests/test_mesh.py @@ -78,7 +78,7 @@ def test_meshes(self): # on a Path3D object test = outline.paths # NOQA - smoothed = mesh.smoothed() # NOQA + smoothed = mesh.smooth_shaded # NOQA assert abs(mesh.volume) > 0.0 diff --git a/tests/test_segments.py b/tests/test_segments.py index 45daa87b1..7a023155c 100644 --- a/tests/test_segments.py +++ b/tests/test_segments.py @@ -113,6 +113,31 @@ def test_resample(self): # make sure overall length hasn't changed assert g.np.isclose(length(res), length(seg)) + def test_clean(self): + from trimesh.path.segments import clean, resample + + seg = g.np.array( + [[[0, 0], [1, 0]], [[1, 0], [1, 1]], [[1, 1], [0, 1]], [[0, 1], [0, 0]]], + dtype=g.np.float64, + ) + + c = clean(seg) + assert len(seg) == len(c) + # bounding box should be the same + assert g.np.allclose( + c.reshape((-1, 2)).min(axis=0), seg.reshape((-1, 2)).min(axis=0) + ) + assert g.np.allclose( + c.reshape((-1, 2)).max(axis=0), seg.reshape((-1, 2)).max(axis=0) + ) + + # resample to shorten + r = resample(seg, maxlen=0.3) + assert r.shape == (16, 2, 2) + # after cleaning should be back to 4 segments + rc = clean(r) + assert rc.shape == (4, 2, 2) + def test_svg(self): from trimesh.path.segments import to_svg diff --git a/tests/test_smooth.py b/tests/test_smooth.py index 6b0144e5a..2f81e1384 100644 --- a/tests/test_smooth.py +++ b/tests/test_smooth.py @@ -7,7 +7,7 @@ class SmoothTest(g.unittest.TestCase): def test_smooth(self): m = g.get_mesh("chair_model.obj", force="mesh") - s = m.smoothed() + s = m.smooth_shaded ori = g.np.hstack((m.visual.uv, m.vertices)) check = g.np.hstack((s.visual.uv, s.vertices)) diff --git a/trimesh/base.py b/trimesh/base.py index 8ef060d1f..f8f9ca2e7 100644 --- a/trimesh/base.py +++ b/trimesh/base.py @@ -37,7 +37,7 @@ units, util, ) -from .constants import log, log_time, tol +from .constants import log, tol from .exceptions import ExceptionWrapper from .exchange.export import export_mesh from .parent import Geometry3D @@ -277,7 +277,7 @@ def mutable(self) -> bool: return self._data.mutable @mutable.setter - def mutable(self, value: bool): + def mutable(self, value: bool) -> None: """ Set the mutability of the current mesh. @@ -342,7 +342,7 @@ def faces_sparse(self) -> coo_matrix: return sparse @property - def face_normals(self): + def face_normals(self) -> NDArray[float64]: """ Return the unit normal vector for each face. @@ -394,7 +394,7 @@ def face_normals(self): return padded @face_normals.setter - def face_normals(self, values): + def face_normals(self, values: NDArray[float64]) -> None: """ Assign values to face normals. @@ -434,7 +434,7 @@ def face_normals(self, values): self._cache["face_normals"] = values @property - def vertices(self): + def vertices(self) -> NDArray[float64]: """ The vertices of the mesh. @@ -451,7 +451,7 @@ def vertices(self): return self._data.get("vertices", np.empty(shape=(0, 3), dtype=float64)) @vertices.setter - def vertices(self, values): + def vertices(self, values: NDArray[float64]): """ Assign vertex values to the mesh. @@ -465,7 +465,7 @@ def vertices(self, values): self._data["vertices"] = np.asanyarray(values, order="C", dtype=float64) @caching.cache_decorator - def vertex_normals(self): + def vertex_normals(self) -> NDArray[float64]: """ The vertex normals of the mesh. If the normals were loaded we check to make sure we have the same number of vertex @@ -491,7 +491,7 @@ def vertex_normals(self): return vertex_normals @vertex_normals.setter - def vertex_normals(self, values: NDArray[float64]): + def vertex_normals(self, values: NDArray[float64]) -> None: """ Assign values to vertex normals. @@ -608,7 +608,7 @@ def centroid(self) -> NDArray[float64]: return centroid @property - def center_mass(self): + def center_mass(self) -> NDArray[float64]: """ The point in space which is the center of mass/volume. @@ -620,7 +620,7 @@ def center_mass(self): return self.mass_properties.center_mass @center_mass.setter - def center_mass(self, value): + def center_mass(self, value: NDArray[float64]) -> None: """ Override the point in space which is the center of mass and volume. @@ -648,7 +648,7 @@ def density(self) -> float: return self.mass_properties.density @density.setter - def density(self, value: float): + def density(self, value: float) -> None: """ Set the density of the primitive. @@ -1138,9 +1138,9 @@ def merge_vertices( self, merge_tex: Optional[bool] = None, merge_norm: Optional[bool] = None, - digits_vertex: None = None, - digits_norm: None = None, - digits_uv: None = None, + digits_vertex: Optional[bool] = None, + digits_norm: Optional[bool] = None, + digits_uv: Optional[bool] = None, ) -> None: """ Removes duplicate vertices grouped by position and @@ -1336,7 +1336,7 @@ def remove_duplicate_faces(self) -> None: ) self.update_faces(self.unique_faces()) - def rezero(self): + def rezero(self) -> None: """ Translate the mesh so that all vertex vertices are positive. @@ -1709,7 +1709,7 @@ def remove_degenerate_faces(self, height: float = tol.merge) -> None: ) self.update_faces(self.nondegenerate_faces(height=height)) - def nondegenerate_faces(self, height=tol.merge) -> NDArray[bool]: + def nondegenerate_faces(self, height: float = tol.merge) -> NDArray[bool]: """ Remove degenerate faces (faces without 3 unique vertex indices) from the current mesh. @@ -1855,7 +1855,7 @@ def facets_on_hull(self) -> NDArray[bool]: return on_hull - def fix_normals(self, multibody: Optional[bool] = None): + def fix_normals(self, multibody: Optional[bool] = None) -> None: """ Find and fix problems with self.face_normals and self.faces winding direction. @@ -1885,7 +1885,7 @@ def fill_holes(self) -> bool: """ return repair.fill_holes(self) - def register(self, other, **kwargs): + def register(self, other: Geometry3D, **kwargs): """ Align a mesh with another mesh or a PointCloud using the principal axes of inertia as a starting point which @@ -1917,7 +1917,11 @@ def register(self, other, **kwargs): return mesh_to_other, cost def compute_stable_poses( - self, center_mass=None, sigma=0.0, n_samples=1, threshold=0.0 + self, + center_mass: Optional[NDArray[float64]] = None, + sigma: float = 0.0, + n_samples: int = 1, + threshold: float = 0.0, ): """ Computes stable orientations of a mesh and their quasi-static probabilities. @@ -2112,38 +2116,48 @@ def subdivide_loop(self, iterations=None): result = Trimesh(vertices=new_vertices, faces=new_faces, process=False) return result - @log_time def smoothed(self, **kwargs): """ - Return a version of the current mesh which will render - nicely, without changing source mesh. + DEPRECATED: use `mesh.smooth_shaded` or `trimesh.graph.smooth_shade(mesh)` + """ + warnings.warn( + "`mesh.smoothed()` is deprected and will be removed in March 2024: " + + "use `mesh.smooth_shaded` or `trimesh.graph.smooth_shade(mesh)`", + category=DeprecationWarning, + stacklevel=2, + ) + # run smoothing + return self.smooth_shaded - Parameters - ------------- - angle : float or None - Angle in radians face pairs with angles - smaller than this will appear smoothed - facet_minarea : float or None - Minimum area fraction to consider - IE for `facets_minarea=25` only facets larger - than `mesh.area / 25` will be considered. + @property + def smooth_shaded(self): + """ + Smooth shading in OpenGL relies on which vertices are shared, + this function will disconnect regions above an angle threshold + and return a non-watertight version which will look better + in an OpenGL rendering context. + + If you would like to use non-default arguments see `graph.smooth_shade`. Returns --------- - smoothed : trimesh.Trimesh - Non watertight version of current mesh - which will render nicely with smooth shading + smooth_shaded : trimesh.Trimesh + Non watertight version of current mesh. """ - - # smooth should be recomputed if visuals change + # key this also by the visual properties + # but store it in the mesh cache self.visual._verify_hash() - cached = self.visual._cache["smoothed"] - if cached is not None: - return cached - # run smoothing - smoothed = graph.smoothed(self, **kwargs) - self.visual._cache["smoothed"] = smoothed - return smoothed + + cache = self.visual._cache + # needs to be dumped whenever visual or mesh changes + key = f"smooth_shaded_{hash(self.visual)}_{hash(self)}" + if key in cache: + return cache[key] + smooth = graph.smooth_shade(self) + + # store it in the mesh cache which dumps when vertices change + cache[key] = smooth + return smooth @property def visual(self): @@ -2216,7 +2230,12 @@ def section( return path - def section_multiplane(self, plane_origin, plane_normal, heights): + def section_multiplane( + self, + plane_origin: NDArray[float64], + plane_normal: NDArray[float64], + heights: NDArray[float64], + ): """ Return multiple parallel cross sections of the current mesh in 2D. diff --git a/trimesh/graph.py b/trimesh/graph.py index a816402d6..a9160d73b 100644 --- a/trimesh/graph.py +++ b/trimesh/graph.py @@ -9,12 +9,14 @@ """ import collections +import warnings import numpy as np from . import exceptions, grouping, util from .constants import log, tol from .geometry import faces_to_edges +from .typed import Optional try: from scipy.sparse import coo_matrix, csgraph @@ -741,9 +743,24 @@ def neighbors(edges, max_index=None, directed=False): return array -def smoothed(mesh, angle=None, facet_minarea=10): +def smoothed(*args, **kwargs): """ - Return a non- watertight version of the mesh which + DEPRECATED: use `trimesh.graph.smooth_shade(mesh, ...)` + """ + warnings.warn( + "`trimesh.graph.smoothed` is deprected and will be removed in March 2024: " + + "use `trimesh.graph.smooth_shade(mesh, ...)`", + category=DeprecationWarning, + stacklevel=2, + ) + return smooth_shade(*args, **kwargs) + + +def smooth_shade( + mesh, angle: Optional[float] = None, facet_minarea: Optional[float] = 10.0 +): + """ + Return a non-watertight version of the mesh which will render nicely with smooth shading by disconnecting faces at sharp angles to each other. diff --git a/trimesh/grouping.py b/trimesh/grouping.py index 44a6c12c1..36abcfca2 100644 --- a/trimesh/grouping.py +++ b/trimesh/grouping.py @@ -781,6 +781,10 @@ def blocks(data, min_len=2, max_len=np.inf, wrap=False, digits=None, only_nonzer if only_nonzero and not bool(data[0]): return blocks + # if all values are True or False we can exit + if len(blocks) == 1 and len(blocks[0]) == len(data): + return blocks + # so now first point equals last point, so the cases are: # - first and last point are in a block: combine two blocks # - first OR last point are in block: add other point to block diff --git a/trimesh/interval.py b/trimesh/interval.py index 34fa856e3..73782d5bf 100644 --- a/trimesh/interval.py +++ b/trimesh/interval.py @@ -8,114 +8,100 @@ import numpy as np +from .typed import NDArray, float64 -def check(a, b, digits): + +def intersection(a: NDArray[float64], b: NDArray[float64]) -> NDArray[float64]: """ - Check input ranges, convert them to vector form, - and get a fixed precision integer version of them. + Given pairs of ranges merge them in to + one range if they overlap. Parameters -------------- - a : (2, ) or (2, n) float + a : (2, ) or (n, 2) Start and end of a 1D interval - b : (2, ) or (2, n) float + b : (2, ) float Start and end of a 1D interval - digits : int - How many digits to consider Returns -------------- - a : (2, n) float - Ranges as vector - b : (2, n) float - Ranges as vector - a_int : (2, n) int64 - Ranges rounded to digits, as vector - b_int : (2, n) int64 - Ranges rounded to digits, as vector - is_1D : bool - If True, input was single pair of ranges + inter : (2, ) or (2, 2) float + The unioned range from the two inputs, + if not `inter.ptp(axis=1)` will be zero. """ a = np.array(a, dtype=np.float64) b = np.array(b, dtype=np.float64) - if a.shape != b.shape or a.shape[-1] != 2: - raise ValueError("ranges must be identical and (2,)!") - - # if input was single interval reshape it here - is_1D = False - if len(a.shape) == 1: - a = a.reshape((-1, 2)) - b = b.reshape((-1, 2)) - is_1D = True + # convert to vectorized form + is_1D = a.shape == (2,) + a = a.reshape((-1, 2)) + b = b.reshape((-1, 2)) - # make sure ranges are sorted + # make sure they're min-max a.sort(axis=1) b.sort(axis=1) + a_low, a_high = a.T + b_low, b_high = b.T - # compare in fixed point as integers - a_int = (a * 10**digits).round().astype(np.int64) - b_int = (b * 10**digits).round().astype(np.int64) - - return a, b, a_int, b_int, is_1D + # do the checks + check = np.logical_not(np.logical_or(b_low >= a_high, a_low >= b_high)) + overlap = np.zeros(a.shape, dtype=np.float64) + overlap[check] = np.column_stack( + ( + np.array([a_low[check], b_low[check]]).max(axis=0), + np.array([a_high[check], b_high[check]]).min(axis=0), + ) + ) + if is_1D: + return overlap[0] -def intersection(a, b, digits=8): - """ - Given a pair of ranges, merge them in to - one range if they overlap at all + return overlap - Parameters - -------------- - a : (2, ) float - Start and end of a 1D interval - b : (2, ) float - Start and end of a 1D interval - digits : int - How many digits to consider - Returns - -------------- - intersects : bool or (n,) bool - Indicates if the ranges overlap at all - new_range : (2, ) or (2, 2) float - The unioned range from the two inputs, - or both of the original ranges if not overlapping +def union(intervals: NDArray[float64], sort: bool = True) -> NDArray[float64]: """ - # check shape and convert - a, b, a_int, b_int, is_1D = check(a, b, digits) - - # what are the starting and ending points of the overlap - overlap = np.zeros(a.shape, dtype=np.float64) - - # A fully overlaps B - current = np.logical_and(a_int[:, 0] <= b_int[:, 0], a_int[:, 1] >= b_int[:, 1]) - overlap[current] = b[current] - - # B fully overlaps A - current = np.logical_and(a_int[:, 0] >= b_int[:, 0], a_int[:, 1] <= b_int[:, 1]) - overlap[current] = a[current] + For array of multiple intervals union them all into + the subset of intervals. - # A starts B ends - # A:, 0 B:, 0 A:, 1 B:, 1 - current = np.logical_and( - np.logical_and(a_int[:, 0] <= b_int[:, 0], b_int[:, 0] < a_int[:, 1]), - a_int[:, 1] < b_int[:, 1], - ) - overlap[current] = np.column_stack([b[current][:, 0], a[current][:, 1]]) - - # B starts A ends - # B:, 0 A:, 0 B:, 1 A:, 1 - current = np.logical_and( - np.logical_and(b_int[:, 0] <= a_int[:, 0], a_int[:, 0] < b_int[:, 1]), - b_int[:, 1] < a_int[:, 1], - ) - overlap[current] = np.column_stack([a[current][:, 0], b[current][:, 1]]) + For example: + `intervals = [[1,2], [2,3]] -> [[1, 3]]` + `intervals = [[1,2], [2.5,3]] -> [[1, 2], [2.5, 3]]` - # is range overlapping at all - intersects = overlap.ptp(axis=1) > 10**-digits - if is_1D: - return intersects[0], overlap[0] + Parameters + ------------ + intervals : (n, 2) + Pairs of `(min, max)` values. + sort + If the array is already ordered into (min, max) pairs + and then pairs sorted by minimum value you can skip the + sorting in this function. - return intersects, overlap + Returns + ---------- + unioned : (m, 2) + New intervals where `m <= n` + """ + if len(intervals) == 0: + return np.empty(0) + + # if the intervals have not been pre-sorted we should apply our sorting logic + # you would only skip this if you are subsetting a larger list elsewhere. + if sort: + # copy inputs and make sure they are (min, max) pairs + intervals = np.sort(intervals, axis=1) + # order them by lowest starting point + intervals = intervals[intervals[:, 0].argsort()] + + # we know we will have at least one interval + unions = [intervals[0]] + + for begin, end in intervals[1:]: + if unions[-1][1] >= begin: + # + unions[-1][1] = max(unions[-1][1], end) + else: + unions.append([begin, end]) + + return np.array(unions) diff --git a/trimesh/path/segments.py b/trimesh/path/segments.py index 7ab3ebdee..566be34ab 100644 --- a/trimesh/path/segments.py +++ b/trimesh/path/segments.py @@ -7,11 +7,13 @@ import numpy as np -from .. import geometry, grouping, interval, transformations, util +from .. import geometry, interval, transformations, util from ..constants import tol +from ..grouping import group_rows, unique_rows +from ..typed import NDArray, float64 -def segments_to_parameters(segments): +def segments_to_parameters(segments: NDArray[float64]): """ For 3D line segments defined by two points, turn them in to an origin defined as the closest point along @@ -56,7 +58,9 @@ def segments_to_parameters(segments): return origins, vectors, parameters -def parameters_to_segments(origins, vectors, parameters): +def parameters_to_segments( + origins: NDArray[float64], vectors: NDArray[float64], parameters: NDArray[float64] +): """ Convert a parametric line segment representation to a two point line segment representation @@ -146,6 +150,44 @@ def colinear_pairs(segments, radius=0.01, angle=0.01, length=None): return colinear +def clean(segments: NDArray[float64], digits: int = 10) -> NDArray[float64]: + """ + Clean up line segments by unioning the ranges of colinear segments. + + Parameters + ------------ + segments : (n, 2, 2) or (n, 2, 3) + Line segments in space. + digits + How many digits to consider. + + Returns + ----------- + cleaned : (m, 2, 2) or (m, 2, 3) + Where `m <= n` + """ + # convert segments to parameterized origins + # which are the closest point on the line to + # the actual zero- origin + origins, vectors, param = segments_to_parameters(segments) + + # make sure parameters are in min-max order + param.sort(axis=1) + + # collect new unified paramameters + p, o, v = [], [], [] + for g in group_rows(np.column_stack((origins, vectors)), digits=digits): + # union the intervals sorting ourselves to skip the `sort(axis=1)` we did above + group = param[g] + u = interval.union(group[group[:, 0].argsort()], sort=False) + p.extend(u) + # use the origins for the subsetted union + o.extend(origins[g[: len(u)]]) + v.extend(vectors[g[: len(u)]]) + + return parameters_to_segments(o, v, p) + + def split(segments, points, atol=1e-5): """ Find any points that lie on a segment (not an endpoint) @@ -231,15 +273,15 @@ def unique(segments, digits=5): segments = np.asanyarray(segments, dtype=np.float64) # find segments as unique indexes so we can find duplicates - inverse = grouping.unique_rows( - segments.reshape((-1, segments.shape[2])), digits=digits - )[1].reshape((-1, 2)) + inverse = unique_rows(segments.reshape((-1, segments.shape[2])), digits=digits)[ + 1 + ].reshape((-1, 2)) # make sure rows are sorted inverse.sort(axis=1) # remove segments where both indexes are the same mask = np.zeros(len(segments), dtype=bool) # only include the first occurrence of a segment - mask[grouping.unique_rows(inverse)[0]] = True + mask[unique_rows(inverse)[0]] = True # remove segments that are zero-length mask[inverse[:, 0] == inverse[:, 1]] = False # apply the unique mask @@ -248,68 +290,6 @@ def unique(segments, digits=5): return unique -def overlap(origins, vectors, params): - """ - Find the overlap of two parallel line segments. - - Parameters - ------------ - origins : (2, 3) float - Origin points of lines in space - vectors : (2, 3) float - Unit direction vectors of lines - params : (2, 2) float - Two (start, end) distance pairs - - Returns - ------------ - length : float - Overlapping length - overlap : (n, 2, 3) float - Line segments for overlapping distance - """ - # copy inputs and make sure shape is correct - origins = np.array(origins).reshape((2, 3)) - vectors = np.array(vectors).reshape((2, 3)) - params = np.array(params).reshape((2, 2)) - - if tol.strict: - # convert input to parameters before flipping - # to make sure we didn't screw it up - truth = parameters_to_segments(origins, vectors, params) - - # this function only works on parallel lines - dot = np.dot(*vectors) - assert np.isclose(np.abs(dot), 1.0, atol=0.01) - - # if two vectors are reversed - if dot < 0.0: - # reverse direction vector - vectors[1] *= -1.0 - # negate parameters - params[1] *= -1.0 - - if tol.strict: - # do a check to make sure our reversal didn't - # inadvertently give us incorrect segments - assert np.allclose(truth, parameters_to_segments(origins, vectors, params)) - - # merge the parameter ranges - ok, new_range = interval.intersection(*params) - - if not ok: - return 0.0, np.array([]) - - # create the overlapping segment pairs (2, 2, 3) - segments = np.array( - [o + v * new_range.reshape((-1, 1)) for o, v in zip(origins, vectors)] - ) - # get the length of the new range - length = new_range.ptp() - - return length, segments - - def extrude(segments, height, double_sided=False): """ Extrude 2D line segments into 3D triangles. @@ -336,11 +316,16 @@ def extrude(segments, height, double_sided=False): # we are creating two vertices triangles for every 2D line segment # on the segments of the 2D triangulation - vertices = np.tile(segments.reshape((-1, 2)), 2).reshape((-1, 2)) - vertices = np.column_stack((vertices, np.tile([0, height, 0, height], len(segments)))) - faces = np.tile([3, 1, 2, 2, 1, 0], (len(segments), 1)) - faces += np.arange(len(segments)).reshape((-1, 1)) * 4 - faces = faces.reshape((-1, 3)) + vertices = np.column_stack( + ( + np.tile(segments.reshape((-1, 2)), 2).reshape((-1, 2)), + np.tile([0, height, 0, height], len(segments)), + ) + ) + faces = ( + np.tile([3, 1, 2, 2, 1, 0], (len(segments), 1)) + + np.arange(len(segments)).reshape((-1, 1)) * 4 + ).reshape((-1, 3)) if double_sided: # stack so they will render from the back @@ -383,7 +368,7 @@ def resample(segments, maxlen, return_index=False, return_count=False): Parameters ------------- - segments : (n, 2, 2) float + segments : (n, 2, 2|3) float 2D line segments maxlen : float The maximum length of a line segment @@ -394,7 +379,7 @@ def resample(segments, maxlen, return_index=False, return_count=False): Returns ------------- - resampled : (m, 2, 3) float + resampled : (m, 2, 2|3) float Line segments where no segment is longer than maxlen index : (m,) int [OPTIONAL] The index of segments resampled came from @@ -404,6 +389,10 @@ def resample(segments, maxlen, return_index=False, return_count=False): # check arguments maxlen = float(maxlen) segments = np.array(segments, dtype=np.float64) + if len(segments.shape) != 3: + raise ValueError(f"{segments.shape} != (n, 2, 2|3)") + + dimension = segments.shape[2] # shortcut for endpoints pt1 = segments[:, 0] @@ -429,13 +418,13 @@ def resample(segments, maxlen, return_index=False, return_count=False): # the vector for each incremental length increment = vec[mask] / split # stack the increment vector into the shape needed - v = tile(increment, split + 1).reshape((-1, 3)) * tile( + v = tile(increment, split + 1).reshape((-1, dimension)) * tile( np.arange(split + 1), len(increment) ).reshape((-1, 1)) # stack the origin points correctly - o = tile(pt1[mask], split + 1).reshape((-1, 3)) + o = tile(pt1[mask], split + 1).reshape((-1, dimension)) # now get each segment as an (split, 3) polyline - poly = (o + v).reshape((-1, split + 1, 3)) + poly = (o + v).reshape((-1, split + 1, dimension)) # save the resulting segments # magical slicing is equivalent to: # > [p[stack] for p in poly] diff --git a/trimesh/rendering.py b/trimesh/rendering.py index 8aea52f97..1f81f882e 100644 --- a/trimesh/rendering.py +++ b/trimesh/rendering.py @@ -69,6 +69,7 @@ def mesh_to_vertexlist(mesh, group=None, smooth=True, smooth_threshold=60000): -------------- args : (7,) tuple Args for vertex list constructor + """ if hasattr(mesh.visual, "uv"): @@ -107,7 +108,7 @@ def mesh_to_vertexlist(mesh, group=None, smooth=True, smooth_threshold=60000): # if we have a small number of faces and colors defined # smooth the mesh by merging vertices of faces below # the threshold angle - mesh = mesh.smoothed() + mesh = mesh.smooth_shaded vertex_count = len(mesh.vertices) normals = mesh.vertex_normals.reshape(-1).tolist() faces = mesh.faces.reshape(-1).tolist() @@ -134,6 +135,7 @@ def mesh_to_vertexlist(mesh, group=None, smooth=True, smooth_threshold=60000): ("n3f/static", normals), color_gl, ) + return args diff --git a/trimesh/scene/scene.py b/trimesh/scene/scene.py index d9326763e..1fb80d998 100644 --- a/trimesh/scene/scene.py +++ b/trimesh/scene/scene.py @@ -252,6 +252,7 @@ def __hash__(self): """ # avoid accessing attribute in tight loop geometry = self.geometry + # hash of geometry and transforms # start with the last modified time of the scene graph hashable = [hex(self.graph.transforms.__hash__())] # take the re-hex string of the hash diff --git a/trimesh/viewer/windowed.py b/trimesh/viewer/windowed.py index 7a360f8fb..327044501 100644 --- a/trimesh/viewer/windowed.py +++ b/trimesh/viewer/windowed.py @@ -229,7 +229,7 @@ def _update_vertex_list(self): for name, geom in self.scene.geometry.items(): if geom.is_empty: continue - if geometry_hash(geom) == self.vertex_list_hash.get(name): + if _geometry_hash(geom) == self.vertex_list_hash.get(name): continue self.add_geometry(name=name, geometry=geom, smooth=bool(self._smooth)) @@ -263,7 +263,7 @@ def add_geometry(self, name, geometry, **kwargs): # create the indexed vertex list self.vertex_list[name] = self.batch.add_indexed(*args) # save the hash of the geometry - self.vertex_list_hash[name] = geometry_hash(geometry) + self.vertex_list_hash[name] = _geometry_hash(geometry) # save the rendering mode from the constructor args self.vertex_list_mode[name] = args[1] @@ -731,7 +731,6 @@ def on_draw(self): # get the transform from world to geometry and mesh name transform, geometry_name = graph.get(current_node) - # if no geometry at this frame continue without rendering if geometry_name is None or geometry_name not in self.vertex_list_mode: continue @@ -749,7 +748,6 @@ def on_draw(self): mesh = geometry[geometry_name] if mesh.is_empty: continue - # get the GL mode of the current geometry mode = self.vertex_list_mode[geometry_name] @@ -842,7 +840,7 @@ def save_image(self, file_obj): return file_obj -def geometry_hash(geometry): +def _geometry_hash(geometry): """ Get a hash for a geometry object