From ed668f37d923b912dc5fc7c1d59591645a800b1d Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Wed, 24 Sep 2025 14:39:35 -0400 Subject: [PATCH 01/28] add raycaster sensor --- examples/keyboard_teleop.py | 3 +- examples/sensors/lidar_teleop.py | 222 +++++++ .../entities/rigid_entity/rigid_geom.py | 6 +- .../solvers/rigid/rigid_solver_decomp.py | 46 +- genesis/recorders/plotters.py | 44 +- genesis/sensors/__init__.py | 3 + genesis/sensors/contact_force.py | 6 + genesis/sensors/raycaster/__init__.py | 10 + genesis/sensors/raycaster/base_pattern.py | 91 +++ genesis/sensors/raycaster/depth_camera.py | 64 ++ genesis/sensors/raycaster/lidar.py | 48 ++ genesis/sensors/raycaster/patterns.py | 297 +++++++++ genesis/sensors/raycaster/raycaster.py | 589 ++++++++++++++++++ genesis/utils/array_class.py | 53 +- genesis/utils/geom.py | 25 + genesis/utils/keyboard.py | 28 + genesis/utils/misc.py | 21 +- tests/test_sensors.py | 60 ++ 18 files changed, 1529 insertions(+), 87 deletions(-) create mode 100644 examples/sensors/lidar_teleop.py create mode 100644 genesis/sensors/raycaster/__init__.py create mode 100644 genesis/sensors/raycaster/base_pattern.py create mode 100644 genesis/sensors/raycaster/depth_camera.py create mode 100644 genesis/sensors/raycaster/lidar.py create mode 100644 genesis/sensors/raycaster/patterns.py create mode 100644 genesis/sensors/raycaster/raycaster.py create mode 100644 genesis/utils/keyboard.py diff --git a/examples/keyboard_teleop.py b/examples/keyboard_teleop.py index a8c5c05a2..6e9ae25dd 100644 --- a/examples/keyboard_teleop.py +++ b/examples/keyboard_teleop.py @@ -17,11 +17,12 @@ import random import threading -import genesis as gs import numpy as np from pynput import keyboard from scipy.spatial.transform import Rotation as R +import genesis as gs + class KeyboardDevice: def __init__(self): diff --git a/examples/sensors/lidar_teleop.py b/examples/sensors/lidar_teleop.py new file mode 100644 index 000000000..468af7c51 --- /dev/null +++ b/examples/sensors/lidar_teleop.py @@ -0,0 +1,222 @@ +import argparse + +import numpy as np +from pynput import keyboard + +import genesis as gs +from genesis.sensors.raycaster.patterns import DepthCameraPattern, GridPattern, SphericalPattern +from genesis.utils.geom import euler_to_quat +from genesis.utils.keyboard import KeyboardDevice + +KEY_DPOS = 0.1 +KEY_DANGLE = 0.1 + + +def build_scene(show_viewer: bool, is_free: bool) -> gs.Scene: + scene = gs.Scene( + viewer_options=gs.options.ViewerOptions( + camera_pos=(6.0, 6.0, 4.0), + camera_lookat=(0.0, 0.0, 0.5), + camera_fov=60, + max_FPS=60, + ), + show_viewer=show_viewer, + show_FPS=False, + ) + + scene.add_entity(gs.morphs.Plane(is_free=is_free)) + + # create ring of obstacles to visualize raycaster sensor hits + inner_radius = 3.0 + for i in range(8): + angle = 2 * np.pi * i / 8 + x = inner_radius * np.cos(angle) + y = inner_radius * np.sin(angle) + scene.add_entity(gs.morphs.Cylinder(height=1.5, radius=0.3, pos=(x, y, 0.75), is_free=is_free)) + + outer_radius = 5.0 + for i in range(6): + angle = 2 * np.pi * i / 6 + np.pi / 6 + x = outer_radius * np.cos(angle) + y = outer_radius * np.sin(angle) + scene.add_entity(gs.morphs.Box(size=(0.5, 0.5, 2.0), pos=(x, y, 1.0), is_free=is_free)) + + return scene + + +def create_robot_with_lidar(scene, args): + """ + Create fixed-base robot with a LiDAR or Depth Camera sensor attached. + + Parameters + ---------- + scene : gs.Scene + The scene to create the robot in. + args : argparse.Namespace + The arguments to create the robot with. + + Returns + ------- + robot : gs.engine.entities.RigidEntity + The robot entity. + sensor : gs.sensors.Raycaster + The LiDAR or Depth Camera sensor. + """ + + robot_kwargs = dict( + pos=(0.0, 0.0, 0.35), + quat=(1.0, 0.0, 0.0, 0.0), + fixed=True, + ) + + if args.use_box: + robot = scene.add_entity(gs.morphs.Box(size=(0.1, 0.1, 0.1), **robot_kwargs)) + pos_offset = (0.0, 0.0, 0.2) + else: + robot = scene.add_entity(gs.morphs.URDF(file="urdf/go2/urdf/go2.urdf", **robot_kwargs)) + pos_offset = (0.3, 0.0, 0.1) + + sensor_kwargs = dict( + entity_idx=robot.idx, + pos_offset=pos_offset, + euler_offset=(0.0, 0.0, 0.0), + return_world_frame=True, + only_cast_fixed=args.fixed, + draw_debug=True, + ) + + if args.pattern == "depth": + sensor = scene.add_sensor(gs.sensors.DepthCamera(pattern=DepthCameraPattern(), **sensor_kwargs)) + return robot, sensor + elif args.pattern == "grid": + pattern_cfg = GridPattern() + else: + if args.pattern != "spherical": + gs.logger.warning(f"Unrecognized raycaster pattern: {args.pattern}. Using 'spherical' instead.") + pattern_cfg = SphericalPattern() + + sensor = scene.add_sensor(gs.sensors.Lidar(pattern=pattern_cfg, **sensor_kwargs)) + return robot, sensor + + +def run(scene: gs.Scene, robot, sensor: gs.sensors.Lidar, n_envs: int, kb: KeyboardDevice, is_depth: bool = False): + if is_depth: + scene.start_recording( + data_func=(lambda: sensor.read_image()[0]) if n_envs > 0 else sensor.read_image, + rec_options=gs.recorders.MPLImagePlot(), + ) + + scene.build(n_envs=n_envs) + + print("Keyboard Controls:") + # Avoid using same keys as interactive viewer keyboard controls + print("[↑/↓/←/→]: Move XY") + print("[j/k]: Down/Up") + print("[n/m]: Roll CCW/CW") + print("[,/.]: Pitch Up/Down") + print("[o/p]: Yaw CCW/CW") + print("[\\]: Reset") + print("[esc]: Quit") + + init_pos = np.array([0.0, 0.0, 0.35], dtype=np.float32) + init_euler = np.array([0.0, 0.0, 0.0], dtype=np.float32) + + target_pos = init_pos.copy() + target_euler = init_euler.copy() + + def apply_pose_to_all_envs(pos_np: np.ndarray, quat_np: np.ndarray): + if n_envs > 0: + pos_np = np.expand_dims(pos_np, axis=0).repeat(n_envs, axis=0) + quat_np = np.expand_dims(quat_np, axis=0).repeat(n_envs, axis=0) + robot.set_pos(pos_np, zero_velocity=False) + robot.set_quat(quat_np, zero_velocity=False) + + apply_pose_to_all_envs(target_pos, euler_to_quat(target_euler)) + + try: + while True: + pressed = kb.pressed_keys.copy() + if keyboard.Key.esc in pressed: + break + if keyboard.KeyCode.from_char("\\") in pressed: + target_pos[:] = init_pos + target_euler[:] = init_euler + + if keyboard.Key.up in pressed: + target_pos[0] += KEY_DPOS + if keyboard.Key.down in pressed: + target_pos[0] -= KEY_DPOS + if keyboard.Key.right in pressed: + target_pos[1] -= KEY_DPOS + if keyboard.Key.left in pressed: + target_pos[1] += KEY_DPOS + if keyboard.KeyCode.from_char("j") in pressed: + target_pos[2] -= KEY_DPOS + if keyboard.KeyCode.from_char("k") in pressed: + target_pos[2] += KEY_DPOS + + if keyboard.KeyCode.from_char("n") in pressed: + target_euler[0] += KEY_DANGLE # roll CCW around +X + if keyboard.KeyCode.from_char("m") in pressed: + target_euler[0] -= KEY_DANGLE # roll CW around +X + if keyboard.KeyCode.from_char(",") in pressed: + target_euler[1] += KEY_DANGLE # pitch up around +Y + if keyboard.KeyCode.from_char(".") in pressed: + target_euler[1] -= KEY_DANGLE # pitch down around +Y + if keyboard.KeyCode.from_char("o") in pressed: + target_euler[2] += KEY_DANGLE # yaw CCW around +Z + if keyboard.KeyCode.from_char("p") in pressed: + target_euler[2] -= KEY_DANGLE # yaw CW around +Z + + apply_pose_to_all_envs(target_pos, euler_to_quat(target_euler)) + + scene.step() + + except KeyboardInterrupt: + gs.logger.info("Simulation interrupted, exiting.") + finally: + gs.logger.info("Simulation finished.") + + +def main(): + parser = argparse.ArgumentParser(description="Genesis LiDAR/Depth Camera Visualization with Keyboard Teleop") + parser.add_argument("-B", "--n_envs", type=int, default=0, help="Number of environments to replicate") + parser.add_argument("--cpu", action="store_true", help="Run on CPU instead of GPU") + parser.add_argument("--use-box", action="store_true", help="Use Box as robot instead of Go2") + parser.add_argument( + "-f", + "--fixed", + action="store_true", + help="Load obstacles as fixed and cast only against fixed objects (is_free=False)", + default=True, + ) + parser.add_argument( + "-nf", + "--no-fixed", + action="store_false", + dest="fixed", + help="Load obstacles as dynamic (is_free=True), raycaster will update BVH every step", + ) + parser.add_argument( + "--pattern", + type=str, + default="spherical", + choices=["spherical", "depth", "grid"], + help="Sensor pattern type", + ) + + args = parser.parse_args() + + gs.init(backend=gs.cpu if args.cpu else gs.gpu, precision="32", logging_level="info") + + kb = KeyboardDevice() + kb.start() + + scene = build_scene(show_viewer=True, is_free=not args.fixed) + robot, lidar = create_robot_with_lidar(scene, args) + + run(scene, robot, lidar, n_envs=args.n_envs, kb=kb, is_depth=args.pattern == "depth") + + +if __name__ == "__main__": + main() diff --git a/genesis/engine/entities/rigid_entity/rigid_geom.py b/genesis/engine/entities/rigid_entity/rigid_geom.py index ae15913e7..edb375e4c 100644 --- a/genesis/engine/entities/rigid_entity/rigid_geom.py +++ b/genesis/engine/entities/rigid_entity/rigid_geom.py @@ -18,9 +18,9 @@ from genesis.utils.misc import tensor_to_array, DeprecationError if TYPE_CHECKING: - from genesis.engine.solvers.rigid.rigid_solver_decomp import RigidSolver from genesis.engine.materials.rigid import Rigid as RigidMaterial from genesis.engine.mesh import Mesh + from genesis.engine.solvers.rigid.rigid_solver_decomp import RigidSolver from .rigid_entity import RigidEntity from .rigid_link import RigidLink @@ -1091,7 +1091,7 @@ def _kernel_get_vgeoms_quat(tensor: ti.types.ndarray(), vgeom_idx: ti.i32, vgeom @ti.kernel def _kernel_get_free_verts( - tensor: ti.types.ndarray(), verts_state_start: ti.i32, n_verts: ti.i32, free_verts_state: array_class.FreeVertsState + tensor: ti.types.ndarray(), verts_state_start: ti.i32, n_verts: ti.i32, free_verts_state: array_class.VertsState ): _B = free_verts_state.pos.shape[1] for i_v_, i, i_b in ti.ndrange(n_verts, 3, _B): @@ -1104,7 +1104,7 @@ def _kernel_get_fixed_verts( tensor: ti.types.ndarray(), verts_state_start: ti.i32, n_verts: ti.i32, - fixed_verts_state: array_class.FixedVertsState, + fixed_verts_state: array_class.VertsState, ): for i_v_, i in ti.ndrange(n_verts, 3): i_v = i_v_ + verts_state_start diff --git a/genesis/engine/solvers/rigid/rigid_solver_decomp.py b/genesis/engine/solvers/rigid/rigid_solver_decomp.py index 054851ea8..370bc10c1 100644 --- a/genesis/engine/solvers/rigid/rigid_solver_decomp.py +++ b/genesis/engine/solvers/rigid/rigid_solver_decomp.py @@ -1,5 +1,4 @@ -from dataclasses import dataclass -from typing import Literal, TYPE_CHECKING +from typing import TYPE_CHECKING, Literal import gstaichi as ti import numpy as np @@ -7,28 +6,24 @@ import torch import genesis as gs -import genesis.utils.geom as gu import genesis.utils.array_class as array_class - +import genesis.utils.geom as gu from genesis.engine.entities import AvatarEntity, DroneEntity, RigidEntity from genesis.engine.entities.base_entity import Entity -from genesis.engine.solvers.rigid.contact_island import ContactIsland from genesis.engine.states.solvers import RigidSolverState from genesis.options.solvers import RigidOptions -from genesis.styles import colors, formats from genesis.utils import linalg as lu -from genesis.utils.misc import ti_to_torch, DeprecationError, ALLOCATE_TENSOR_WARNING +from genesis.utils.misc import ALLOCATE_TENSOR_WARNING, DeprecationError, ti_to_torch +from genesis.utils.sdf_decomp import SDF -from ....utils.sdf_decomp import SDF from ..base_solver import Solver +from .collider_decomp import Collider from .constraint_solver_decomp import ConstraintSolver from .constraint_solver_decomp_island import ConstraintSolverIsland -from .collider_decomp import Collider from .rigid_solver_decomp_util import func_wakeup_entity_and_its_temp_island if TYPE_CHECKING: import genesis.engine.solvers.rigid.array_class - from genesis.engine.scene import Scene from genesis.engine.simulator import Simulator @@ -5069,8 +5064,8 @@ def kernel_update_verts_for_geoms( geoms_state: array_class.GeomsState, geoms_info: array_class.GeomsInfo, verts_info: array_class.VertsInfo, - free_verts_state: array_class.FreeVertsState, - fixed_verts_state: array_class.FixedVertsState, + free_verts_state: array_class.VertsState, + fixed_verts_state: array_class.VertsState, ): n_geoms = geoms_idx.shape[0] _B = geoms_state.verts_updated.shape[1] @@ -5086,8 +5081,8 @@ def func_update_verts_for_geom( geoms_state: array_class.GeomsState, geoms_info: array_class.GeomsInfo, verts_info: array_class.VertsInfo, - free_verts_state: array_class.FreeVertsState, - fixed_verts_state: array_class.FixedVertsState, + free_verts_state: array_class.VertsState, + fixed_verts_state: array_class.VertsState, ): if not geoms_state.verts_updated[i_g, i_b]: if geoms_info.is_fixed[i_g]: @@ -5133,6 +5128,29 @@ def func_update_all_verts( @ti.kernel(pure=gs.use_pure) +def kernel_update_all_verts( + geoms_state: array_class.GeomsState, + verts_info: array_class.VertsInfo, + free_verts_state: array_class.VertsState, + fixed_verts_state: array_class.VertsState, +): + n_verts = verts_info.geom_idx.shape[0] + _B = geoms_state.pos.shape[1] + for i_v, i_b in ti.ndrange(n_verts, _B): + g_pos = geoms_state.pos[verts_info.geom_idx[i_v], i_b] + g_quat = geoms_state.quat[verts_info.geom_idx[i_v], i_b] + verts_state_idx = verts_info.verts_state_idx[i_v] + if verts_info.is_free[i_v]: + free_verts_state.pos[verts_state_idx, i_b] = gu.ti_transform_by_trans_quat( + verts_info.init_pos[i_v], g_pos, g_quat + ) + elif i_b == 0: + fixed_verts_state.pos[verts_state_idx] = gu.ti_transform_by_trans_quat( + verts_info.init_pos[i_v], g_pos, g_quat + ) + + +@ti.kernel def kernel_update_geom_aabbs( geoms_state: array_class.GeomsState, geoms_init_AABB: array_class.GeomsInitAABB, diff --git a/genesis/recorders/plotters.py b/genesis/recorders/plotters.py index 01a2a7081..a18b046a3 100644 --- a/genesis/recorders/plotters.py +++ b/genesis/recorders/plotters.py @@ -446,12 +446,9 @@ def cleanup(self): """Clean up matplotlib resources.""" super().cleanup() - # Logger may not be available anymore - logger_exists = hasattr(gs, "logger") + self.lines.clear() + self.backgrounds.clear() - if self.fig is not None: - try: - import matplotlib.pyplot as plt plt.close(self.fig) if logger_exists: @@ -466,12 +463,6 @@ def get_image_array(self): """ Capture the plot image as a video frame. - Returns - ------- - image_array : np.ndarray - The RGB image as a numpy array. - """ - from matplotlib.backends.backend_agg import FigureCanvasAgg self._lock.acquire() if isinstance(self.fig.canvas, FigureCanvasAgg): @@ -480,21 +471,24 @@ def get_image_array(self): rgba_array_flat = np.frombuffer(self.fig.canvas.buffer_rgba(), dtype=np.uint8) rgb_array = rgba_array_flat.reshape((height, width, 4))[..., :3] - # Rescale image if necessary - if (width, height) != tuple(self._options.window_size): - img = Image.fromarray(rgb_array) - img = img.resize(self._options.window_size, resample=Image.BILINEAR) - rgb_array = np.asarray(img) - else: - rgb_array = rgb_array.copy() + import matplotlib.pyplot as plt + + self.image_plot = None + self.background = None + + self.fig, self.ax = plt.subplots(figsize=self.figsize) + self.fig.tight_layout(pad=0) + self.ax.set_axis_off() + self.fig.subplots_adjust(left=0, right=1, top=1, bottom=0) + self.image_plot = self.ax.imshow(np.zeros((1, 1)), cmap="plasma", origin="upper", aspect="auto") + self._show_fig() + + def process(self, data, cur_time): + """Process new image data and update display.""" + if isinstance(data, torch.Tensor): + img_data = tensor_to_array(data) else: - # Slower but more generic fallback only if necessary - buffer = io.BytesIO() - self.fig.canvas.print_figure(buffer, format="png", dpi="figure") - buffer.seek(0) - img = Image.open(buffer) - rgb_array = np.asarray(img.convert("RGB")) - self._lock.release() + img_data = np.asarray(data) return rgb_array diff --git a/genesis/sensors/__init__.py b/genesis/sensors/__init__.py index 8c43ba5ca..ff71125c6 100644 --- a/genesis/sensors/__init__.py +++ b/genesis/sensors/__init__.py @@ -2,4 +2,7 @@ from .contact_force import ContactForceSensorOptions as ContactForce from .contact_force import ContactSensorOptions as Contact from .imu import IMUOptions as IMU +from .raycaster import DepthCameraOptions as DepthCamera +from .raycaster import LidarOptions as Lidar +from .raycaster import RaycasterOptions as Raycaster from .sensor_manager import SensorManager diff --git a/genesis/sensors/contact_force.py b/genesis/sensors/contact_force.py index 255819dbb..c86825034 100644 --- a/genesis/sensors/contact_force.py +++ b/genesis/sensors/contact_force.py @@ -131,6 +131,9 @@ def build(self): self._shared_metadata.expanded_links_idx, link_idx, expand=(1,), dim=0 ) + if self._options.draw_debug: + self.debug_object = None + def _get_return_format(self) -> tuple[int, ...]: return (1,) @@ -298,6 +301,9 @@ def build(self): _to_tuple(self._options.max_force, length_per_value=3), ) + if self._options.draw_debug: + self.debug_object = None + def _get_return_format(self) -> tuple[int, ...]: return (3,) diff --git a/genesis/sensors/raycaster/__init__.py b/genesis/sensors/raycaster/__init__.py new file mode 100644 index 000000000..445a7f67e --- /dev/null +++ b/genesis/sensors/raycaster/__init__.py @@ -0,0 +1,10 @@ +from .base_pattern import DynamicPatternGenerator, RaycastPattern, RaycastPatternGenerator +from .depth_camera import DepthCameraOptions +from .lidar import LidarOptions +from .patterns import ( + DepthCameraPattern, + GridPattern, + RaycastPattern, + SphericalPattern, +) +from .raycaster import RaycasterOptions diff --git a/genesis/sensors/raycaster/base_pattern.py b/genesis/sensors/raycaster/base_pattern.py new file mode 100644 index 000000000..3e709b41c --- /dev/null +++ b/genesis/sensors/raycaster/base_pattern.py @@ -0,0 +1,91 @@ +from dataclasses import dataclass + +import torch + +import genesis as gs + + +@dataclass +class RaycastPattern: + """ + Base class for raycast pattern options. + """ + + def validate(self): + """Validate the pattern options.""" + pass + + def get_return_shape(self) -> tuple[int, ...]: + """Get the shape of the ray vectors, e.g. (n_scan_lines, n_points_per_line) or (n_rays,)""" + raise NotImplementedError(f"{type(self).__name__} must implement `get_return_shape()`.") + + +class RaycastPatternGenerator: + """ + Base class for raycast pattern generators. + + Implementations should be registered using the @register_pattern decorator. + """ + + def __init__(self, options: RaycastPattern): + self._options: RaycastPattern = options + self._return_shape: tuple[int, ...] = self._options.get_return_shape() + + def get_ray_directions(self) -> torch.Tensor: + """ + Get the local direction vectors of the rays. + """ + raise NotImplementedError(f"{type(self).__name__} must implement `get_ray_directions()`.") + + def get_ray_starts(self) -> torch.Tensor: + """ + Get the local start positions of the rays. + + As a default, return zeros which means all rays start at the local origin. + """ + return torch.zeros((*self._return_shape, 3), dtype=gs.tc_float, device=gs.device) + + +class DynamicPatternGenerator(RaycastPatternGenerator): + """Base class for dynamic raycast pattern generators.""" + + def __init__(self, options: RaycastPattern): + super().__init__(options) + self._cached_ray_directions: torch.Tensor = torch.empty(self._return_shape, dtype=gs.tc_float, device=gs.device) + + def update_ray_directions(self, cur_t: float): + """ + Compute the local direction vectors of the rays and save them to `self._cached_ray_dirs`. + + Parameters + ---------- + cur_t : float + Current simulation time in seconds. + """ + raise NotImplementedError(f"{type(self).__name__} must implement `update_ray_directions()`.") + + def get_ray_directions(self) -> torch.Tensor: + """ + Get the local direction vectors of the rays. + """ + if self._cached_ray_directions.isnan().any(): + self.update_ray_directions(self._cached_ray_directions) + return self._cached_ray_directions + + +RAYCAST_PATTERN_NAME_TO_OPTIONS_MAP: dict[str, type[RaycastPattern]] = {} +RAYCAST_PATTERN_OPTIONS_TO_GENERATOR_MAP: dict[type[RaycastPattern], type[RaycastPatternGenerator]] = {} + + +def register_pattern(pattern_type: type[RaycastPattern], name: str): + def _impl(generator_type: type[RaycastPatternGenerator]): + RAYCAST_PATTERN_NAME_TO_OPTIONS_MAP[name] = pattern_type + RAYCAST_PATTERN_OPTIONS_TO_GENERATOR_MAP[pattern_type] = generator_type + return generator_type + + return _impl + + +def create_pattern_generator(pattern: RaycastPattern) -> RaycastPatternGenerator: + generator_cls = RAYCAST_PATTERN_OPTIONS_TO_GENERATOR_MAP[type(pattern)] + return generator_cls(pattern) diff --git a/genesis/sensors/raycaster/depth_camera.py b/genesis/sensors/raycaster/depth_camera.py new file mode 100644 index 000000000..fc9c752db --- /dev/null +++ b/genesis/sensors/raycaster/depth_camera.py @@ -0,0 +1,64 @@ +import torch + +import genesis as gs +from genesis.sensors.sensor_manager import register_sensor + +from .patterns import DepthCameraPattern +from .raycaster import RaycasterOptions, RaycasterSensor, RaycasterSharedMetadata + + +class DepthCameraOptions(RaycasterOptions): + """ + Depth camera that uses ray casting to obtain depth images. + + Parameters + ---------- + entity_idx : int + The global entity index of the RigidEntity to which this sensor is attached. + link_idx_local : int, optional + The local index of the RigidLink of the RigidEntity to which this sensor is attached. + pos_offset : tuple[float, float, float], optional + The mounting offset position of the sensor in the world frame. Defaults to (0.0, 0.0, 0.0). + euler_offset : tuple[float, float, float], optional + The mounting offset quaternion of the sensor in the world frame. Defaults to (0.0, 0.0, 0.0). + pattern: DepthCameraPattern + The raycasting pattern configuration for the sensor. + min_range : float, optional + The minimum sensing range in meters. Defaults to 0.0. + max_range : float, optional + The maximum sensing range in meters. Defaults to 20.0. + return_world_frame : bool, optional + Whether to return points in the world frame. Defaults to False (local frame). + only_cast_fixed : bool, optional + Whether to only cast rays on fixed geoms. Defaults to False. This is a shared option, so the value of this + option for the **first** lidar sensor will be the behavior for **all** Lidar sensors. + delay : float, optional + The delay in seconds before the sensor data is read. + update_ground_truth_only : bool, optional + If True, the sensor will only update the ground truth cache, and not the measured cache. + draw_debug : bool, optional + If True and the rasterizer visualization is active, spheres will be drawn at every hit point. + debug_sphere_radius: float, optional + The radius of each debug hit point sphere drawn in the scene. Defaults to 0.02. + """ + + def validate(self, scene): + super().validate(scene) + if not isinstance(self.pattern, DepthCameraPattern): + gs.raise_exception("DepthCamera pattern must be an instance of DepthCameraPattern") + + +@register_sensor(DepthCameraOptions, RaycasterSharedMetadata) +class DepthCameraSensor(RaycasterSensor): + def read_image(self) -> torch.Tensor: + """ + Read the depth image from the sensor. + + This method uses the hit distances from the underlying RaycasterSensor.read() method and reshapes into image. + + Returns + ------- + torch.Tensor + The depth image with shape (height, width). + """ + return self.read()["hit_ranges"].reshape(self._options.pattern.height, self._options.pattern.width) diff --git a/genesis/sensors/raycaster/lidar.py b/genesis/sensors/raycaster/lidar.py new file mode 100644 index 000000000..d3d41f97c --- /dev/null +++ b/genesis/sensors/raycaster/lidar.py @@ -0,0 +1,48 @@ +from genesis.sensors.sensor_manager import register_sensor + +from .raycaster import RaycasterOptions, RaycasterSensor, RaycasterSharedMetadata + + +class LidarOptions(RaycasterOptions): + """ + Lidar sensor that performs ray casting to get distance measurements and point clouds. + + Parameters + ---------- + entity_idx : int + The global entity index of the RigidEntity to which this sensor is attached. + link_idx_local : int, optional + The local index of the RigidLink of the RigidEntity to which this sensor is attached. + pos_offset : tuple[float, float, float], optional + The mounting offset position of the sensor in the world frame. Defaults to (0.0, 0.0, 0.0). + euler_offset : tuple[float, float, float], optional + The mounting offset quaternion of the sensor in the world frame. Defaults to (0.0, 0.0, 0.0). + pattern: RaycasterPattern + The raycasting pattern configuration for the sensor. + min_range : float, optional + The minimum sensing range in meters. Defaults to 0.0. + max_range : float, optional + The maximum sensing range in meters. Defaults to 20.0. + no_hit_value : float, optional + The value to return for no hit. Defaults to max_range if not specified. + return_world_frame : bool, optional + Whether to return points in the world frame. Defaults to False (local frame). + only_cast_fixed : bool, optional + Whether to only cast rays on fixed geoms. Defaults to False. This is a shared option, so the value of this + option for the **first** lidar sensor will be the behavior for **all** Lidar sensors. + delay : float, optional + The delay in seconds before the sensor data is read. + update_ground_truth_only : bool, optional + If True, the sensor will only update the ground truth cache, and not the measured cache. + draw_debug : bool, optional + If True and the rasterizer visualization is active, spheres will be drawn at every hit point. + debug_sphere_radius: float, optional + The radius of each debug hit point sphere drawn in the scene. Defaults to 0.02. + """ + + pass + + +@register_sensor(LidarOptions, RaycasterSharedMetadata) +class LidarSensor(RaycasterSensor): + pass diff --git a/genesis/sensors/raycaster/patterns.py b/genesis/sensors/raycaster/patterns.py new file mode 100644 index 000000000..4abec7945 --- /dev/null +++ b/genesis/sensors/raycaster/patterns.py @@ -0,0 +1,297 @@ +import math +from dataclasses import dataclass +from typing import Sequence + +import torch + +import genesis as gs +from genesis.utils.geom import spherical_to_cartesian + +from .base_pattern import RaycastPattern, RaycastPatternGenerator, register_pattern + + +def _generate_uniform_angles( + n: int | None = None, + fov: float | tuple[float, float] | None = None, + res: float | None = None, + angles: Sequence[float] | None = None, + use_degrees: bool = True, +) -> torch.Tensor: + + if angles is None: + assert fov is not None, "FOV should be provided if angles not given." + + if res is not None: + if isinstance(fov, tuple): + f_min, f_max = fov + else: + f_max = fov / 2.0 + f_min = -f_max + n = math.ceil((f_max - f_min) / res) + 1 + + assert n is not None + + if isinstance(fov, tuple): + f_min, f_max = fov + fov_size = f_max - f_min + else: + f_max = fov / 2.0 + f_min = -f_max + fov_size = fov + + full_rotation = 360.0 if use_degrees else math.pi + assert fov_size <= full_rotation + gs.EPS, "FOV should not be larger than a full rotation." + + # avoid duplicate angle at 0/360 degrees + if fov_size >= full_rotation - gs.EPS: + f_max -= fov_size / (n - 1) * 0.5 + + angles = torch.linspace(f_min, f_max, n, dtype=gs.tc_float, device=gs.device) + + if use_degrees: + angles = torch.deg2rad(angles) + + return angles + + +def _compute_focal_lengths( + width: int, height: int, fov_horizontal: float | None, fov_vertical: float | None +) -> tuple[float, float]: + if fov_horizontal is not None and fov_vertical is None: + fh_rad = math.radians(fov_horizontal) + fv_rad = 2.0 * math.atan((height / width) * math.tan(fh_rad / 2.0)) + elif fov_vertical is not None and fov_horizontal is None: + fv_rad = math.radians(fov_vertical) + fh_rad = 2.0 * math.atan((width / height) * math.tan(fv_rad / 2.0)) + else: + fh_rad = math.radians(fov_horizontal) + fv_rad = math.radians(fov_vertical) + + fx = width / (2.0 * math.tan(fh_rad / 2.0)) + fy = height / (2.0 * math.tan(fv_rad / 2.0)) + + return fx, fy + + +# ============================== Generic Patterns ============================== +@dataclass +class GridPattern(RaycastPattern): + """ + Configuration for grid-based ray casting. + + Defines a 2D grid of rays in the sensor coordinate system. + + Parameters + ---------- + resolution : float + Grid spacing in meters. + size : tuple[float, float] + Grid dimensions (length, width) in meters. + direction : tuple[float, float, float] + Ray direction vector. + ordering : str + Point ordering, either "xy" or "yx". + """ + + resolution: float = 0.1 + size: tuple[float, float] = (2.0, 2.0) + direction: tuple[float, float, float] = (0.0, 0.0, -1.0) + ordering: str = "xy" + + def validate(self): + if self.ordering not in ["xy", "yx"]: + raise ValueError(f"Ordering must be 'xy' or 'yx'. Received: '{self.ordering}'.") + if self.resolution <= 0: + raise ValueError(f"Resolution must be greater than 0. Received: '{self.resolution}'.") + + def get_return_shape(self) -> tuple[int, ...]: + num_x = math.ceil(self.size[0] / self.resolution) + 1 + num_y = math.ceil(self.size[1] / self.resolution) + 1 + return (num_x, num_y) + + +@register_pattern(GridPattern, "grid") +class GridPatternGenerator(RaycastPatternGenerator): + """Generator for 2D grid ray patterns.""" + + def __init__(self, options: GridPattern): + super().__init__(options) + self.x_coords = torch.arange( + -options.size[0] / 2, options.size[0] / 2 + gs.EPS, options.resolution, dtype=gs.tc_float, device=gs.device + ) + self.y_coords = torch.arange( + -options.size[1] / 2, options.size[1] / 2 + gs.EPS, options.resolution, dtype=gs.tc_float, device=gs.device + ) + self.direction = torch.tensor(options.direction, dtype=gs.tc_float, device=gs.device) + + def get_ray_directions(self) -> torch.Tensor: + return self.direction.expand((*self._return_shape, 3)) + + def get_ray_starts(self) -> torch.Tensor: + if self._options.ordering == "xy": + grid_x, grid_y = torch.meshgrid(self.x_coords, self.y_coords, indexing="xy") + else: + grid_x, grid_y = torch.meshgrid(self.x_coords, self.y_coords, indexing="ij") + + starts = torch.empty((*self._return_shape, 3), dtype=gs.tc_float, device=gs.device) + starts[..., 0] = grid_x + starts[..., 1] = grid_y + starts[..., 2] = 0.0 + + return starts + + +@dataclass +class SphericalPattern(RaycastPattern): + """ + Configuration for spherical ray pattern. + + Either specify: + - (n_vertical, n_horizontal, fov_vertical, fov_horizontal) for uniform spacing by count. + - (vertical_res, horizontal_res, fov_vertical, fov_horizontal) for uniform spacing by resolution. + - (vertical_angles, horizontal_angles) for custom angles. + + + Parameters + ---------- + fov_vertical: float | tuple[float, float] + Vertical field of view. + fov_horizontal: float | tuple[float, float] + Horizontal field of view. + n_vertical : int + Number of vertical/elevation scan lines. + n_horizontal : int + Number of horizontal/azimuth points per scan line. + res_vertical : float, optional + Vertical angular resolution in degrees. Overrides n_vertical if provided. + res_horizontal : float, optional + Horizontal angular resolution in degrees. Overrides n_horizontal if provided. + angles_vertical : Sequence[float], optional + Array of elevation angles. Overrides n_vertical/res_vertical/fov_vertical if provided. + angles_horizontal: Sequence[float], optional + Array of azimuth angles. Overrides n_horizontal/res_horizontal/fov_horizontal if provided. + use_degrees : bool, optional + Whether the provided angles are in degrees or radians. Defaults to True. + """ + + fov_vertical: float | tuple[float, float] = 30.0 + fov_horizontal: float | tuple[float, float] = 360.0 + n_vertical: int = 64 + n_horizontal: int = 128 + res_vertical: float | None = None + res_horizontal: float | None = None + vertical_angles: Sequence[float] | None = None + horizontal_angles: Sequence[float] | None = None + use_degrees: bool = True + + def validate(self): + full_rotation = 360.0 if self.use_degrees else math.pi * 2.0 + for fov in (self.fov_vertical, self.fov_horizontal): + if (isinstance(fov, float) and (fov < 0 or fov > full_rotation + gs.EPS)) or ( + isinstance(fov, tuple) and (fov[1] - fov[0] > full_rotation + gs.EPS) + ): + gs.raise_exception(f"[{type(self).__class__}] FOV should not be <0 or >{full_rotation}. Got: {fov}.") + + def get_return_shape(self) -> tuple[int, ...]: + if self.vertical_angles is not None and self.horizontal_angles is not None: + return (len(self.vertical_angles), len(self.horizontal_angles)) + else: + return (self.n_vertical, self.n_horizontal) + + +@register_pattern(SphericalPattern, "spherical") +class SphericalPatternGenerator(RaycastPatternGenerator): + """Generator for uniform or custom spherical ray patterns.""" + + def get_ray_directions(self) -> torch.Tensor: + v_angles = _generate_uniform_angles( + n=self._options.n_vertical, + fov=self._options.fov_vertical, + res=self._options.res_vertical, + angles=self._options.vertical_angles, + use_degrees=self._options.use_degrees, + ) + h_angles = _generate_uniform_angles( + n=self._options.n_horizontal, + fov=self._options.fov_horizontal, + res=self._options.res_horizontal, + angles=self._options.horizontal_angles, + use_degrees=self._options.use_degrees, + ) + + h_grid, v_grid = torch.meshgrid(h_angles, v_angles, indexing="ij") + return spherical_to_cartesian(h_grid, v_grid) + + +# ============================== Camera Patterns ============================== + + +@dataclass +class DepthCameraPattern(RaycastPattern): + """Configuration for pinhole depth camera ray casting. + + Parameters + ---------- + width : int + Image width in pixels. + height : int + Image height in pixels. + fx : float | None + Focal length in x direction (pixels). Computed from FOV if None. + fy : float | None + Focal length in y direction (pixels). Computed from FOV if None. + cx : float | None + Principal point x coordinate (pixels). Defaults to image center if None. + cy : float | None + Principal point y coordinate (pixels). Defaults to image center if None. + fov_horizontal : float + Horizontal field of view in degrees. Used to compute fx if fx is None. + fov_vertical : float | None + Vertical field of view in degrees. Used to compute fy if fy is None. + """ + + width: int = 128 + height: int = 96 + fx: float | None = None + fy: float | None = None + cx: float | None = None + cy: float | None = None + fov_horizontal: float = 90.0 + fov_vertical: float | None = None + + def get_return_shape(self) -> tuple[int, ...]: + return (self.height, self.width) + + +@register_pattern(DepthCameraPattern, "depth_camera") +class DepthCameraPatternGenerator(RaycastPatternGenerator): + """Generator for pinhole depth camera ray patterns.""" + + def get_ray_directions(self) -> torch.Tensor: + W, H = int(self._options.width), int(self._options.height) + + if W <= 0 or H <= 0: + raise ValueError("Image dimensions must be positive") + + fx, fy, cx, cy = self._options.fx, self._options.fy, self._options.cx, self._options.cy + if fx is None or fy is None: + fx, fy = _compute_focal_lengths(W, H, self._options.fov_horizontal, self._options.fov_vertical) + if cx is None: + cx = W * 0.5 + if cy is None: + cy = H * 0.5 + + u = torch.arange(0, W, dtype=gs.tc_float, device=gs.device) + 0.5 + v = torch.arange(0, H, dtype=gs.tc_float, device=gs.device) + 0.5 + uu, vv = torch.meshgrid(u, v, indexing="xy") + + # standard camera frame coordinates + x_c = (uu - cx) / fx + y_c = (vv - cy) / fy + z_c = torch.ones_like(x_c, dtype=gs.tc_float, device=gs.device) + + # transform to robotics camera frame + dirs = torch.stack([z_c, -x_c, -y_c], dim=-1) + dirs /= torch.linalg.norm(dirs, dim=-1, keepdim=True) + + return dirs diff --git a/genesis/sensors/raycaster/raycaster.py b/genesis/sensors/raycaster/raycaster.py new file mode 100644 index 000000000..bc0170979 --- /dev/null +++ b/genesis/sensors/raycaster/raycaster.py @@ -0,0 +1,589 @@ +import itertools +import math +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any + +import gstaichi as ti +import numpy as np +import torch + +import genesis as gs +import genesis.engine.solvers.rigid.rigid_solver_decomp as rigid_solver_decomp +import genesis.utils.array_class as array_class +from genesis.engine.bvh import AABB, LBVH +from genesis.sensors.sensor_manager import register_sensor +from genesis.utils.geom import ( + ti_normalize, + ti_transform_by_quat, + ti_transform_by_trans_quat, + transform_by_quat, + transform_by_trans_quat, +) +from genesis.utils.misc import concat_with_tensor, make_tensor_field +from genesis.vis.rasterizer_context import RasterizerContext + +from ..base_sensor import ( + RigidSensorMetadataMixin, + RigidSensorMixin, + RigidSensorOptionsMixin, + Sensor, + SensorOptions, + SharedSensorMetadata, +) +from .base_pattern import RaycastPattern, RaycastPatternGenerator, create_pattern_generator + +if TYPE_CHECKING: + from genesis.utils.ring_buffer import TensorRingBuffer + + +DEBUG_COLORS = ( + (1.0, 0.2, 0.2, 1.0), + (0.2, 1.0, 0.2, 1.0), + (0.2, 0.6, 1.0, 1.0), + (1.0, 1.0, 0.2, 1.0), +) + + +@ti.func +def ray_triangle_intersection(ray_start, ray_dir, v0, v1, v2): + """ + Möller-Trumbore ray-triangle intersection. + + Returns: vec4(t, u, v, hit) where hit=1.0 if intersection found, 0.0 otherwise + """ + result = ti.math.vec4(0.0, 0.0, 0.0, 0.0) + + edge1 = v1 - v0 + edge2 = v2 - v0 + + # Begin calculating determinant - also used to calculate u parameter + h = ray_dir.cross(edge2) + a = edge1.dot(h) + + # Check all conditions in sequence without early returns + valid = True + + t = 0.0 + u = 0.0 + v = 0.0 + f = 0.0 + s = ti.math.vec3(0.0, 0.0, 0.0) + q = ti.math.vec3(0.0, 0.0, 0.0) + + # If determinant is near zero, ray lies in plane of triangle + if ti.abs(a) < gs.EPS: + valid = False + + if valid: + f = 1.0 / a + s = ray_start - v0 + u = f * s.dot(h) + + if u < 0.0 or u > 1.0: + valid = False + + if valid: + q = s.cross(edge1) + v = f * ray_dir.dot(q) + + if v < 0.0 or u + v > 1.0: + valid = False + + if valid: + # At this stage we can compute t to find out where the intersection point is on the line + t = f * edge2.dot(q) + + # Ray intersection + if t <= gs.EPS: + valid = False + + if valid: + result = ti.math.vec4(t, u, v, 1.0) + + return result + + +@ti.func +def ray_aabb_intersection(ray_start, ray_dir, aabb_min, aabb_max): + """ + Fast ray-AABB intersection test. + Returns the t value of intersection, or -1.0 if no intersection. + """ + result = -1.0 + + # Use the slab method for ray-AABB intersection + inv_dir = 1.0 / ray_dir + + # Handle potential division by zero with large values + large_value = ti.static(1 / gs.EPS) + if ti.abs(ray_dir.x) < gs.EPS: + inv_dir.x = large_value if ray_dir.x >= 0.0 else -large_value + if ti.abs(ray_dir.y) < gs.EPS: + inv_dir.y = large_value if ray_dir.y >= 0.0 else -large_value + if ti.abs(ray_dir.z) < gs.EPS: + inv_dir.z = large_value if ray_dir.z >= 0.0 else -large_value + + t1 = (aabb_min - ray_start) * inv_dir + t2 = (aabb_max - ray_start) * inv_dir + + tmin = ti.min(t1, t2) + tmax = ti.max(t1, t2) + + t_near = ti.max(ti.max(tmin.x, tmin.y), tmin.z) + t_far = ti.min(ti.min(tmax.x, tmax.y), tmax.z) + + # Check if ray intersects AABB + if t_near <= t_far and t_far >= 0.0: + result = ti.max(t_near, 0.0) + + return result + + +@ti.kernel +def kernel_update_aabbs( + map_lidar_faces: ti.template(), + free_verts_state: array_class.VertsState, + fixed_verts_state: array_class.VertsState, + verts_info: array_class.VertsInfo, + faces_info: array_class.FacesInfo, + aabb_state: array_class.AABBState, +): + for i_b, i_f_ in ti.ndrange(free_verts_state.pos.shape[1], map_lidar_faces.shape[0]): + i_f = map_lidar_faces[i_f_] + aabb_state.aabbs[i_b, i_f].min.fill(ti.math.inf) + aabb_state.aabbs[i_b, i_f].max.fill(-ti.math.inf) + + for i in ti.static(range(3)): + i_v = verts_info.verts_state_idx[faces_info.verts_idx[i_f][i]] + if verts_info.is_free[faces_info.verts_idx[i_f][i]]: + pos_v = free_verts_state.pos[i_v, i_b] + aabb_state.aabbs[i_b, i_f].min = ti.min(aabb_state.aabbs[i_b, i_f].min, pos_v) + aabb_state.aabbs[i_b, i_f].max = ti.max(aabb_state.aabbs[i_b, i_f].max, pos_v) + else: + pos_v = fixed_verts_state.pos[i_v] + aabb_state.aabbs[i_b, i_f].min = ti.min(aabb_state.aabbs[i_b, i_f].min, pos_v) + aabb_state.aabbs[i_b, i_f].max = ti.max(aabb_state.aabbs[i_b, i_f].max, pos_v) + + +@ti.kernel +def kernel_cast_lidar_rays( + map_lidar_faces: ti.template(), + fixed_verts_state: array_class.VertsState, + free_verts_state: array_class.VertsState, + verts_info: array_class.VertsInfo, + faces_info: array_class.FacesInfo, + bvh_nodes: ti.template(), + bvh_morton_codes: ti.template(), # maps sorted leaves to original triangle indices + links_pos: ti.types.ndarray(ndim=3), # [n_env, n_sensors, 3] + links_quat: ti.types.ndarray(ndim=3), # [n_env, n_sensors, 4] + ray_starts: ti.types.ndarray(ndim=2), # [n_points, 3] + ray_directions: ti.types.ndarray(ndim=2), # [n_points, 3] + max_ranges: ti.types.ndarray(ndim=1), # [n_sensors] + no_hit_values: ti.types.ndarray(ndim=1), # [n_sensors] + points_to_sensor_idx: ti.types.ndarray(ndim=1), # [n_points] + is_world_frame: ti.types.ndarray(ndim=1), # [n_sensors] + output_hits: ti.types.ndarray(ndim=2), # [n_env, n_points_per_sensor * 4 * n_sensors] +): + """ + Taichi kernel for ray casting, accelerated by a Bounding Volume Hierarchy (BVH). + + The result `output_hits` will be a 2D array of shape (n_env, n_points * 4) where in the second dimension, + the first n_points * 3 are hit points and the last n_points is hit ranges. + """ + STACK_SIZE = ti.static(64) + + n_triangles = map_lidar_faces.shape[0] + n_points = ray_starts.shape[0] + # batch, point + for i_b, i_p in ti.ndrange(output_hits.shape[0], n_points): + i_s = points_to_sensor_idx[i_p] + + # --- 1. Setup Ray --- + link_pos = ti.math.vec3(links_pos[i_b, i_s, 0], links_pos[i_b, i_s, 1], links_pos[i_b, i_s, 2]) + link_quat = ti.math.vec4( + links_quat[i_b, i_s, 0], links_quat[i_b, i_s, 1], links_quat[i_b, i_s, 2], links_quat[i_b, i_s, 3] + ) + + ray_start_local = ti.math.vec3(ray_starts[i_p, 0], ray_starts[i_p, 1], ray_starts[i_p, 2]) + ray_start_world = ti_transform_by_trans_quat(ray_start_local, link_pos, link_quat) + + ray_dir_local = ti.math.vec3(ray_directions[i_p, 0], ray_directions[i_p, 1], ray_directions[i_p, 2]) + ray_direction_world = ti_normalize(ti_transform_by_quat(ray_dir_local, link_quat)) + + # --- 2. BVH Traversal --- + max_range = max_ranges[i_s] + hit_face = -1 + + # Stack for non-recursive traversal + stack = ti.Vector.zero(ti.i32, STACK_SIZE) + stack[0] = 0 # Start traversal at the root node (index 0) + stack_ptr = 1 + + while stack_ptr > 0: + stack_ptr -= 1 + node_idx = stack[stack_ptr] + + node = bvh_nodes[i_b, node_idx] + + # Check if ray hits the node's bounding box + aabb_t = ray_aabb_intersection(ray_start_world, ray_direction_world, node.bound.min, node.bound.max) + + if aabb_t >= 0.0 and aabb_t < max_range: + if node.left == -1: # is leaf node + # A leaf node corresponds to one of the sorted triangles. Find the original triangle index. + sorted_leaf_idx = node_idx - (n_triangles - 1) + + assert n_triangles > 0 + assert sorted_leaf_idx >= 0 + assert sorted_leaf_idx < n_triangles + + original_tri_idx = bvh_morton_codes[0, sorted_leaf_idx][1] + + assert original_tri_idx >= 0 + assert original_tri_idx < n_triangles + + i_f = map_lidar_faces[original_tri_idx] + is_free = verts_info.is_free[faces_info.verts_idx[i_f][0]] + + v0 = ti.Vector.zero(gs.ti_float, 3) + v1 = ti.Vector.zero(gs.ti_float, 3) + v2 = ti.Vector.zero(gs.ti_float, 3) + + if is_free: + v0 = free_verts_state.pos[verts_info.verts_state_idx[faces_info.verts_idx[i_f][0]], i_b] + v1 = free_verts_state.pos[verts_info.verts_state_idx[faces_info.verts_idx[i_f][1]], i_b] + v2 = free_verts_state.pos[verts_info.verts_state_idx[faces_info.verts_idx[i_f][2]], i_b] + + else: + v0 = fixed_verts_state.pos[verts_info.verts_state_idx[faces_info.verts_idx[i_f][0]]] + v1 = fixed_verts_state.pos[verts_info.verts_state_idx[faces_info.verts_idx[i_f][1]]] + v2 = fixed_verts_state.pos[verts_info.verts_state_idx[faces_info.verts_idx[i_f][2]]] + + # Perform the expensive ray-triangle intersection test + hit_result = ray_triangle_intersection(ray_start_world, ray_direction_world, v0, v1, v2) + + if hit_result.w > 0.0 and hit_result.x < max_range and hit_result.x >= 0.0: + max_range = hit_result.x + hit_face = i_f + # hit_u, hit_v could be stored here if needed + + else: # It's an INTERNAL node + assert node.left >= 0 + assert node.right >= 0 + assert node.left < bvh_nodes.shape[1] + assert node.right < bvh_nodes.shape[1] + # Push children onto the stack for further traversal + # Make sure stack doesn't overflow + if stack_ptr < ti.static(STACK_SIZE - 2): + stack[stack_ptr] = node.left + stack[stack_ptr + 1] = node.right + stack_ptr += 2 + + # --- 3. Process Hit Result --- + if hit_face >= 0: + dist = max_range + output_hits[i_b, n_points * 3 + i_p] = dist + + if is_world_frame[i_s]: + hit_point = ray_start_world + dist * ray_direction_world + output_hits[i_b, i_p * 3 + 0] = hit_point.x + output_hits[i_b, i_p * 3 + 1] = hit_point.y + output_hits[i_b, i_p * 3 + 2] = hit_point.z + else: + # Local frame output along provided local ray direction + hit_point = dist * ti_normalize( + ti.math.vec3( + ray_directions[i_p, 0], + ray_directions[i_p, 1], + ray_directions[i_p, 2], + ) + ) + output_hits[i_b, i_p * 3 + 0] = hit_point.x + output_hits[i_b, i_p * 3 + 1] = hit_point.y + output_hits[i_b, i_p * 3 + 2] = hit_point.z + + else: + output_hits[i_b, i_p * 3 + 0] = 0.0 + output_hits[i_b, i_p * 3 + 1] = 0.0 + output_hits[i_b, i_p * 3 + 2] = 0.0 + output_hits[i_b, n_points * 3 + i_p] = no_hit_values[i_s] + + +class RaycasterOptions(RigidSensorOptionsMixin, SensorOptions): + """ + Raycaster sensor that performs ray casting to get distance measurements and point clouds. + + Parameters + ---------- + entity_idx : int + The global entity index of the RigidEntity to which this sensor is attached. + link_idx_local : int, optional + The local index of the RigidLink of the RigidEntity to which this sensor is attached. + pos_offset : tuple[float, float, float], optional + The mounting offset position of the sensor in the world frame. Defaults to (0.0, 0.0, 0.0). + euler_offset : tuple[float, float, float], optional + The mounting offset quaternion of the sensor in the world frame. Defaults to (0.0, 0.0, 0.0). + pattern: RaycastPatternOptions | str + The raycasting pattern configuration for the sensor. If a string is provided, a config file with the same name + is expected in the `configs` directory. + min_range : float, optional + The minimum sensing range in meters. Defaults to 0.0. + max_range : float, optional + The maximum sensing range in meters. Defaults to 20.0. + no_hit_value : float, optional + The value to return for no hit. Defaults to max_range if not specified. + return_world_frame : bool, optional + Whether to return points in the world frame. Defaults to False (local frame). + only_cast_fixed : bool, optional + Whether to only cast rays on fixed geoms. Defaults to False. This is a shared option, so the value of this + option for the **first** lidar sensor will be the behavior for **all** Lidar sensors. + delay : float, optional + The delay in seconds before the sensor data is read. + update_ground_truth_only : bool, optional + If True, the sensor will only update the ground truth cache, and not the measured cache. + draw_debug : bool, optional + If True and the rasterizer visualization is active, spheres will be drawn at every hit point. + debug_sphere_radius: float, optional + The radius of each debug hit point sphere drawn in the scene. Defaults to 0.02. + """ + + pattern: RaycastPattern | str + min_range: float = 0.0 + max_range: float = 20.0 + no_hit_value: float | None = None + return_world_frame: bool = False + only_cast_fixed: bool = False + + debug_sphere_radius: float = 0.02 + + +@dataclass +class RaycasterSharedMetadata(RigidSensorMetadataMixin, SharedSensorMetadata): + bvh: LBVH | None = None + aabb: AABB | None = None + only_cast_fixed: bool = False + map_lidar_faces: Any | None = None + n_lidar_faces: int = 0 + + sensors_ray_start_idx: list[int] = field(default_factory=list) + total_n_rays: int = 0 + + min_ranges: torch.Tensor = make_tensor_field((0,)) + max_ranges: torch.Tensor = make_tensor_field((0,)) + no_hit_values: torch.Tensor = make_tensor_field((0,)) + return_world_frame: torch.Tensor = make_tensor_field((0,), dtype_factory=lambda: gs.tc_bool) + + pattern_generators: list[RaycastPatternGenerator] = field(default_factory=list) + ray_dirs: torch.Tensor = make_tensor_field((0, 3)) + ray_starts: torch.Tensor = make_tensor_field((0, 3)) + ray_starts_world: torch.Tensor = make_tensor_field((0, 3)) + ray_dirs_world: torch.Tensor = make_tensor_field((0, 3)) + + points_to_sensor_idx: torch.Tensor = make_tensor_field((0,), dtype_factory=lambda: gs.tc_int) + + +@register_sensor(RaycasterOptions, RaycasterSharedMetadata) +@ti.data_oriented +class RaycasterSensor(RigidSensorMixin, Sensor): + + def build(self): + super().build() # set shared metadata from RigidSensorMixin + + # first lidar sensor initialization: build aabb and bvh + if self._shared_metadata.bvh is None: + self._shared_metadata.only_cast_fixed = self._options.only_cast_fixed # set for first only + + n_lidar_faces = self._shared_metadata.solver.faces_info.geom_idx.shape[0] + np_map_lidar_faces = np.arange(n_lidar_faces) + if self._shared_metadata.only_cast_fixed: + # count the number of faces in a fixed geoms + geom_is_fixed = np.logical_not(self._shared_metadata.solver.geoms_info.is_free.to_numpy()) + faces_geom = self._shared_metadata.solver.faces_info.geom_idx.to_numpy() + n_lidar_faces = np.sum(geom_is_fixed[faces_geom]) + if n_lidar_faces == 0: + gs.raise_exception( + "No fixed geoms found in the scene. To use only_cast_fixed, " + "at least some entities should have is_free=False." + ) + np_map_lidar_faces = np.where(geom_is_fixed[faces_geom])[0] + + self._shared_metadata.map_lidar_faces = ti.field(ti.i32, (n_lidar_faces)) + self._shared_metadata.map_lidar_faces.from_numpy(np_map_lidar_faces) + self._shared_metadata.n_lidar_faces = n_lidar_faces + + self._shared_metadata.aabb = AABB( + n_batches=self._shared_metadata.solver.free_verts_state.pos.shape[1], n_aabbs=n_lidar_faces + ) + + print("RaycasterSensor::build preupdate -----") + print("aabb n_batches") + print(self._shared_metadata.aabb.n_batches) + print("aabb n_aabbs") + print(self._shared_metadata.aabb.n_aabbs) + print("aabbs") + print(self._shared_metadata.aabb.aabbs) + + rigid_solver_decomp.kernel_update_all_verts( + geoms_state=self._shared_metadata.solver.geoms_state, + verts_info=self._shared_metadata.solver.verts_info, + free_verts_state=self._shared_metadata.solver.free_verts_state, + fixed_verts_state=self._shared_metadata.solver.fixed_verts_state, + ) + + kernel_update_aabbs( + map_lidar_faces=self._shared_metadata.map_lidar_faces, + free_verts_state=self._shared_metadata.solver.free_verts_state, + fixed_verts_state=self._shared_metadata.solver.fixed_verts_state, + verts_info=self._shared_metadata.solver.verts_info, + faces_info=self._shared_metadata.solver.faces_info, + aabb_state=self._shared_metadata.aabb, + ) + self._shared_metadata.bvh = LBVH(self._shared_metadata.aabb) + self._shared_metadata.bvh.build() + + print("RaycasterSensor::build -----") + print("aabb n_batches") + print(self._shared_metadata.aabb.n_batches) + print("aabb n_aabbs") + print(self._shared_metadata.aabb.n_aabbs) + print("aabbs") + print(self._shared_metadata.aabb.aabbs) + print() + print("bvh nodes") + print(self._shared_metadata.bvh.nodes) + print("bvh morton codes") + print(self._shared_metadata.bvh.morton_codes) + + self.pattern_generator = create_pattern_generator(self._options.pattern) + self._shared_metadata.pattern_generators.append(self.pattern_generator) + pos_offset = self._shared_metadata.offsets_pos[-1, :] + quat_offset = self._shared_metadata.offsets_quat[..., -1, :] + if self._shared_metadata.solver.n_envs > 0: + quat_offset = quat_offset[0] # all envs have same offset on build + + ray_starts = self.pattern_generator.get_ray_starts().reshape(-1, 3) + ray_starts = transform_by_trans_quat(ray_starts, pos_offset, quat_offset) + self._shared_metadata.ray_starts = torch.cat([self._shared_metadata.ray_starts, ray_starts]) + + ray_dirs = self.pattern_generator.get_ray_directions().reshape(-1, 3) + ray_dirs = transform_by_quat(ray_dirs, quat_offset) + self._shared_metadata.ray_dirs = torch.cat([self._shared_metadata.ray_dirs, ray_dirs]) + + num_rays = math.prod(self.pattern_generator._return_shape) + self._shared_metadata.sensors_ray_start_idx.append(self._shared_metadata.total_n_rays) + self._shared_metadata.total_n_rays += num_rays + + self._shared_metadata.points_to_sensor_idx = concat_with_tensor( + self._shared_metadata.points_to_sensor_idx, + [self._idx] * num_rays, + flatten=True, + ) + self._shared_metadata.return_world_frame = concat_with_tensor( + self._shared_metadata.return_world_frame, self._options.return_world_frame + ) + self._shared_metadata.min_ranges = concat_with_tensor(self._shared_metadata.min_ranges, self._options.min_range) + self._shared_metadata.max_ranges = concat_with_tensor(self._shared_metadata.max_ranges, self._options.max_range) + no_hit_value = self._options.no_hit_value if self._options.no_hit_value is not None else self._options.max_range + self._shared_metadata.no_hit_values = concat_with_tensor(self._shared_metadata.no_hit_values, no_hit_value) + + if self._options.draw_debug: + self.debug_objects = [None] * self._manager._sim._B + + def _get_return_format(self) -> dict[str, tuple[int, ...]]: + shape = self._options.pattern.get_return_shape() + return { + "hit_points": (*shape, 3), + "hit_ranges": shape, + } + + @classmethod + def _get_cache_dtype(cls) -> torch.dtype: + return gs.tc_float + + @classmethod + def _update_shared_ground_truth_cache( + cls, shared_metadata: RaycasterSharedMetadata, shared_ground_truth_cache: torch.Tensor + ): + if not shared_metadata.only_cast_fixed: + rigid_solver_decomp.kernel_update_all_verts( + geoms_state=shared_metadata.solver.geoms_state, + verts_info=shared_metadata.solver.verts_info, + free_verts_state=shared_metadata.solver.free_verts_state, + fixed_verts_state=shared_metadata.solver.fixed_verts_state, + ) + + kernel_update_aabbs( + map_lidar_faces=shared_metadata.map_lidar_faces, + free_verts_state=shared_metadata.solver.free_verts_state, + fixed_verts_state=shared_metadata.solver.fixed_verts_state, + verts_info=shared_metadata.solver.verts_info, + faces_info=shared_metadata.solver.faces_info, + aabb_state=shared_metadata.aabb, + ) + + links_pos = shared_metadata.solver.get_links_pos(links_idx=shared_metadata.links_idx) + links_quat = shared_metadata.solver.get_links_quat(links_idx=shared_metadata.links_idx) + if shared_metadata.solver.n_envs == 0: + links_pos = links_pos.unsqueeze(0) + links_quat = links_quat.unsqueeze(0) + + print("RaycasterSensor::update -----") + print("aabb n_batches") + print(shared_metadata.aabb.n_batches) + print("aabb n_aabbs") + print(shared_metadata.aabb.n_aabbs) + print("aabbs") + print(shared_metadata.aabb.aabbs) + print() + print("bvh nodes") + print(shared_metadata.bvh.nodes) + print("bvh morton codes") + print(shared_metadata.bvh.morton_codes) + + kernel_cast_lidar_rays( + map_lidar_faces=shared_metadata.map_lidar_faces, + fixed_verts_state=shared_metadata.solver.fixed_verts_state, + free_verts_state=shared_metadata.solver.free_verts_state, + verts_info=shared_metadata.solver.verts_info, + faces_info=shared_metadata.solver.faces_info, + bvh_nodes=shared_metadata.bvh.nodes, + bvh_morton_codes=shared_metadata.bvh.morton_codes, + links_pos=links_pos.contiguous(), + links_quat=links_quat.contiguous(), + ray_starts=shared_metadata.ray_starts, + ray_directions=shared_metadata.ray_dirs, + max_ranges=shared_metadata.max_ranges, + no_hit_values=shared_metadata.no_hit_values, + points_to_sensor_idx=shared_metadata.points_to_sensor_idx, + is_world_frame=shared_metadata.return_world_frame, + output_hits=shared_ground_truth_cache, + ) + + @classmethod + def _update_shared_cache( + cls, + shared_metadata: RaycasterSharedMetadata, + shared_ground_truth_cache: torch.Tensor, + shared_cache: torch.Tensor, + buffered_data: "TensorRingBuffer", + ): + buffered_data.append(shared_ground_truth_cache) + cls._apply_delay_to_shared_cache(shared_metadata, shared_cache, buffered_data) + + def _draw_debug(self, context: "RasterizerContext"): + """ + Draw hit points as spheres in the scene. + + Spheres will be different colors per environment. + """ + data = self.read()["hit_points"].reshape(self._manager._sim._B, -1, 3) + + for env_idx, color in zip(range(data.shape[0]), itertools.cycle(DEBUG_COLORS)): + points = data[env_idx] + + if self.debug_objects[env_idx] is not None: + context.clear_debug_object(self.debug_objects[env_idx]) + + self.debug_objects[env_idx] = context.draw_debug_spheres( + points, radius=self._options.debug_sphere_radius, color=color + ) diff --git a/genesis/utils/array_class.py b/genesis/utils/array_class.py index 37cf008a4..4e27faf91 100644 --- a/genesis/utils/array_class.py +++ b/genesis/utils/array_class.py @@ -1,14 +1,12 @@ import dataclasses -import inspect -import os from functools import partial -from typing import Any, Callable, Type, cast +import os import gstaichi as ti -from gstaichi.lang._fast_caching import FIELD_METADATA_CACHE_VALUE, args_hasher +from gstaichi.lang._fast_caching import FIELD_METADATA_CACHE_VALUE +import numpy as np import genesis as gs -import numpy as np # as a temporary solution, we get is_ndarray from os's environment variable use_ndarray = os.environ.get("GS_USE_NDARRAY", "0") == "1" @@ -1851,14 +1849,21 @@ def __init__(self): return ClassEdgesInfo() -# =========================================== FreeVertsState =========================================== +# =========================================== VertsState =========================================== @dataclasses.dataclass -class StructFreeVertsState: +class StructVertsState: pos: V_ANNOTATION +@ti.data_oriented +class ClassVertsState: + def __init__(self, kwargs): + for k, v in kwargs.items(): + setattr(self, k, v) + + def get_free_verts_state(solver): shape = solver._batch_shape(solver.n_free_verts_) kwargs = { @@ -1866,24 +1871,9 @@ def get_free_verts_state(solver): } if use_ndarray: - return StructFreeVertsState(**kwargs) + return StructVertsState(**kwargs) else: - - @ti.data_oriented - class ClassFreeVertsState: - def __init__(self): - for k, v in kwargs.items(): - setattr(self, k, v) - - return ClassFreeVertsState() - - -# =========================================== FixedVertsState =========================================== - - -@dataclasses.dataclass -class StructFixedVertsState: - pos: V_ANNOTATION + return ClassVertsState(kwargs) def get_fixed_verts_state(solver): @@ -1893,16 +1883,9 @@ def get_fixed_verts_state(solver): } if use_ndarray: - return StructFixedVertsState(**kwargs) + return StructVertsState(**kwargs) else: - - @ti.data_oriented - class ClassFixedVertsState: - def __init__(self): - for k, v in kwargs.items(): - setattr(self, k, v) - - return ClassFixedVertsState() + return ClassVertsState(kwargs) # =========================================== VvertsInfo =========================================== @@ -2251,8 +2234,7 @@ def __init__(self, solver): LinksInfo = ti.template() if not use_ndarray else StructLinksInfo JointsInfo = ti.template() if not use_ndarray else StructJointsInfo JointsState = ti.template() if not use_ndarray else StructJointsState -FreeVertsState = ti.template() if not use_ndarray else StructFreeVertsState -FixedVertsState = ti.template() if not use_ndarray else StructFixedVertsState +VertsState = ti.template() if not use_ndarray else StructVertsState VertsInfo = ti.template() if not use_ndarray else StructVertsInfo EdgesInfo = ti.template() if not use_ndarray else StructEdgesInfo FacesInfo = ti.template() if not use_ndarray else StructFacesInfo @@ -2273,3 +2255,4 @@ def __init__(self, solver): SDFInfo = ti.template() if not use_ndarray else StructSDFInfo ContactIslandState = ti.template() if not use_ndarray else StructContactIslandState DiffContactInput = ti.template() if not use_ndarray else StructDiffContactInput +AABBState = ti.template() diff --git a/genesis/utils/geom.py b/genesis/utils/geom.py index 3c46b2c30..d651c968c 100644 --- a/genesis/utils/geom.py +++ b/genesis/utils/geom.py @@ -1584,6 +1584,31 @@ def transform_inertia_by_T(inertia_tensor, T, mass): return R @ inertia_tensor @ R.T + translation_inertia +def spherical_to_cartesian(theta: torch.Tensor, phi: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Convert spherical coordinates to Cartesian coordinates. + + Parameters + ---------- + theta : torch.Tensor + Horizontal angles in radians. + phi : torch.Tensor + Vertical angles in radians. + + Returns + ------- + vectors : torch.Tensor + Vectors in cartesian coordinates as tensor of shape (..., 3). + """ + cos_phi = torch.cos(phi) + + x = torch.cos(theta) * cos_phi # forward + y = torch.sin(theta) * cos_phi # left + z = torch.sin(phi) # up + + return torch.stack([x, y, z], dim=-1) + + def slerp(q0, q1, t): """ Perform spherical linear interpolation between two quaternions. diff --git a/genesis/utils/keyboard.py b/genesis/utils/keyboard.py new file mode 100644 index 000000000..d2ccd4658 --- /dev/null +++ b/genesis/utils/keyboard.py @@ -0,0 +1,28 @@ +import threading + +from pynput import keyboard + + +class KeyboardDevice: + def __init__(self): + self.pressed_keys = set() + self.lock = threading.Lock() + self.listener = keyboard.Listener(on_press=self.on_press, on_release=self.on_release) + + def start(self): + self.listener.start() + + def stop(self): + self.listener.stop() + self.listener.join() + + def on_press(self, key: keyboard.Key): + with self.lock: + self.pressed_keys.add(key) + + def on_release(self, key: keyboard.Key): + with self.lock: + self.pressed_keys.discard(key) + + def get_cmd(self): + return self.pressed_keys diff --git a/genesis/utils/misc.py b/genesis/utils/misc.py index 7a4909a3c..928004c1d 100644 --- a/genesis/utils/misc.py +++ b/genesis/utils/misc.py @@ -3,29 +3,28 @@ import functools import logging import math +import os import platform import random -import types -import shutil import sys -import os +import types import weakref from collections import OrderedDict from dataclasses import dataclass, field from typing import Any, Callable, NoReturn, Optional, Type -import numpy as np import cpuinfo +import gstaichi as ti +import numpy as np import psutil import pyglet import torch -import gstaichi as ti from gstaichi.lang.util import is_ti_template, to_pytorch_type, to_numpy_type from gstaichi._kernels import tensor_to_ext_arr, matrix_to_ext_arr, ndarray_to_ext_arr, ndarray_matrix_to_ext_arr from gstaichi.lang import impl -from gstaichi.types import primitive_types from gstaichi.lang.exception import handle_exception_from_cpp +from gstaichi.types import primitive_types import genesis as gs from genesis.constants import backend as gs_backend @@ -312,7 +311,9 @@ def is_approx_multiple(a, b, tol=1e-7): return abs(a % b) < tol or abs(b - (a % b)) < tol -def concat_with_tensor(tensor: torch.Tensor, value, expand: tuple[int, ...] | None = None, dim: int = 0): +def concat_with_tensor( + tensor: torch.Tensor, value, expand: tuple[int, ...] | None = None, dim: int = 0, flatten: bool = False +): """Helper method to concatenate a value (not necessarily a tensor) with a tensor.""" if not isinstance(value, torch.Tensor): value = torch.tensor([value], dtype=tensor.dtype, device=tensor.device) @@ -320,6 +321,8 @@ def concat_with_tensor(tensor: torch.Tensor, value, expand: tuple[int, ...] | No value = value.expand(*expand) if dim < 0: dim = tensor.ndim + dim + if flatten: + value = value.flatten() assert ( 0 <= dim < tensor.ndim and tensor.ndim == value.ndim @@ -590,14 +593,14 @@ def _ti_to_python( is_out_of_bounds = not (0 <= mask < _ti_data_shape[i]) elif isinstance(mask, torch.Tensor): if not mask.ndim <= 1: - gs.raise_exception(f"Expecting 1D tensor for masks.") + gs.raise_exception("Expecting 1D tensor for masks.") # Resort on post-mortem analysis for bounds check because runtime would be to costly is_out_of_bounds = None else: # np.ndarray, list, tuple, range try: mask_start, mask_end = min(mask), max(mask) except ValueError: - gs.raise_exception(f"Expecting 1D tensor for masks.") + gs.raise_exception("Expecting 1D tensor for masks.") is_out_of_bounds = not (0 <= mask_start <= mask_end < _ti_data_shape[i]) if is_out_of_bounds: gs.raise_exception("Masks are out-of-range.") diff --git a/tests/test_sensors.py b/tests/test_sensors.py index 1816d6b3a..54c11f685 100644 --- a/tests/test_sensors.py +++ b/tests/test_sensors.py @@ -248,3 +248,63 @@ def test_rigid_tactile_sensors_gravity_force(show_viewer, tol, n_envs): tol=NOISE * 10, err_msg="ContactForceSensor should read bias and noise and -gravity (normal) force clipped by max_force.", ) + + +@pytest.mark.required +@pytest.mark.parametrize("n_envs", [0, 2]) +@pytest.mark.parametrize("is_fixed", [False, True]) +def test_lidar_grid_pattern_ground_distance(show_viewer, tol, n_envs, is_fixed): + """Test if the Raycaster sensor with GridPattern rays pointing to ground returns the correct distance.""" + EXPECTED_DISTANCE = 1.5 # Distance from sensor to floor + NUM_RAYS_XY = 4 # Total number of rays is NUM_RAYS_XY ** 2 + BOX_SIZE = 0.1 # Size of the box to which the LiDAR is attached + + scene = gs.Scene( + profiling_options=gs.options.ProfilingOptions(show_FPS=False), + show_viewer=show_viewer, + ) + + plane = scene.add_entity(gs.morphs.Plane(is_free=not is_fixed)) + + block = scene.add_entity( + gs.morphs.Box( + size=(BOX_SIZE, BOX_SIZE, BOX_SIZE), + pos=(0.0, 0.0, EXPECTED_DISTANCE + BOX_SIZE / 2.0), + fixed=True, + ), + ) + + grid_pattern = gs.sensors.raycaster.GridPattern( + resolution=1.0 / (NUM_RAYS_XY - 1.0), + size=(1.0, 1.0), + direction=(0.0, 0.0, -1.0), # pointing downwards to ground + ) + + lidar = scene.add_sensor( + gs.sensors.Lidar( + pattern=grid_pattern, + entity_idx=block.idx, + pos_offset=(0.0, 0.0, -BOX_SIZE / 2.0), + return_world_frame=True, + only_cast_fixed=is_fixed, + ) + ) + + scene.build(n_envs=n_envs) + + scene.step() + + sensor_data = lidar.read() + hit_points = sensor_data["hit_points"] + distances = sensor_data["hit_ranges"] + + expected_shape = (NUM_RAYS_XY, NUM_RAYS_XY) if n_envs == 0 else (n_envs, NUM_RAYS_XY, NUM_RAYS_XY) + assert distances.shape == expected_shape, f"Expected distances shape {expected_shape}, got {distances.shape}" + + assert_allclose(hit_points[..., 2], 0.0, tol=tol, err_msg="LiDAR hit point should be at ground level (z≈0)") + assert_allclose( + distances, + EXPECTED_DISTANCE, + tol=tol, + err_msg=f"LiDAR should measure distance {EXPECTED_DISTANCE}m to ground plane", + ) From 3d1467376baf4e8db6bea06489f2c2e527643dab Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Tue, 30 Sep 2025 10:24:45 -0400 Subject: [PATCH 02/28] deprecate compile_kernels, move sensor build --- genesis/engine/scene.py | 23 +++++++++++++++++------ genesis/engine/simulator.py | 2 -- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/genesis/engine/scene.py b/genesis/engine/scene.py index 0cfd38cb2..4aac14d8d 100644 --- a/genesis/engine/scene.py +++ b/genesis/engine/scene.py @@ -773,7 +773,7 @@ def build( env_spacing=(0.0, 0.0), n_envs_per_row: int | None = None, center_envs_at_origin=True, - compile_kernels=True, + compile_kernels=None, ): """ Builds the scene once all entities have been added. This operation is required before running the simulation. @@ -781,16 +781,23 @@ def build( Parameters ---------- n_envs : int - Number of parallel environments to create. If `n_envs` is 0, the scene will not have a batching dimension. If `n_envs` is greater than 0, the first dimension of all the input and returned states will be the batch dimension. + Number of parallel environments to create. + If `n_envs` is 0, the scene will not have a batching dimension. When greater than 0, the first dimension of + all the input and returned states will be the batch dimension. env_spacing : tuple of float, shape (2,) - The spacing between adjacent environments in the scene. This is for visualization purposes only and does not change simulation-related poses. + The spacing between adjacent environments in the scene. + This is for visualization purposes only and does not change simulation-related poses. n_envs_per_row : int The number of environments per row for visualization. If None, it will be set to `sqrt(n_envs)`. center_envs_at_origin : bool Whether to put the center of all the environments at the origin (for visualization only). - compile_kernels : bool - Whether to compile the simulation kernels inside `build()`. If False, the kernels will not be compiled (or loaded if found in the cache) until the first call of `scene.step()`. This is useful for cases you don't want to run the actual simulation, but rather just want to visualize the created scene. + compile_kernels : bool, optional + This parameter is deprecated and will be removed in future release. """ + if compile_kernels is not None: + warn_once("`compile_kernels` is deprecated and will be removed in future release.") + compile_kernels = True + with gs.logger.timer(f"Building scene ~~~<{self._uid}>~~~..."): self._parallelize(n_envs, env_spacing, n_envs_per_row, center_envs_at_origin) @@ -803,11 +810,15 @@ def build( self._is_built = True - if compile_kernels: + # This check should be removed when deprecating `compile_kernels`. + if compile_kernels or len(self._sim._sensor_manager._sensors) > 0: with gs.logger.timer("Compiling simulation kernels..."): self._sim.step() self._reset() + # sensors + self._sim._sensor_manager.build() + # visualizer with gs.logger.timer("Building visualizer..."): self._visualizer.build() diff --git a/genesis/engine/simulator.py b/genesis/engine/simulator.py index 5574cc99f..80a77a0c8 100644 --- a/genesis/engine/simulator.py +++ b/genesis/engine/simulator.py @@ -210,8 +210,6 @@ def build(self): if self.n_envs > 0 and self.sf_solver.is_active(): gs.raise_exception("Batching is not supported for SF solver as of now.") - self._sensor_manager.build() - # hybrid for entity in self._entities: if isinstance(entity, HybridEntity): From 49d731d12bda5e94867270e1688b78195fee1aa5 Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Tue, 30 Sep 2025 10:27:43 -0400 Subject: [PATCH 03/28] update docstring --- genesis/sensors/raycaster/depth_camera.py | 2 +- genesis/sensors/raycaster/lidar.py | 2 +- genesis/sensors/raycaster/raycaster.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/genesis/sensors/raycaster/depth_camera.py b/genesis/sensors/raycaster/depth_camera.py index fc9c752db..c76e63927 100644 --- a/genesis/sensors/raycaster/depth_camera.py +++ b/genesis/sensors/raycaster/depth_camera.py @@ -37,7 +37,7 @@ class DepthCameraOptions(RaycasterOptions): update_ground_truth_only : bool, optional If True, the sensor will only update the ground truth cache, and not the measured cache. draw_debug : bool, optional - If True and the rasterizer visualization is active, spheres will be drawn at every hit point. + If True and the interactive viewer is active, spheres will be drawn at every hit point. debug_sphere_radius: float, optional The radius of each debug hit point sphere drawn in the scene. Defaults to 0.02. """ diff --git a/genesis/sensors/raycaster/lidar.py b/genesis/sensors/raycaster/lidar.py index d3d41f97c..4e9af4bf0 100644 --- a/genesis/sensors/raycaster/lidar.py +++ b/genesis/sensors/raycaster/lidar.py @@ -35,7 +35,7 @@ class LidarOptions(RaycasterOptions): update_ground_truth_only : bool, optional If True, the sensor will only update the ground truth cache, and not the measured cache. draw_debug : bool, optional - If True and the rasterizer visualization is active, spheres will be drawn at every hit point. + If True and the interactive viewer is active, spheres will be drawn at every hit point. debug_sphere_radius: float, optional The radius of each debug hit point sphere drawn in the scene. Defaults to 0.02. """ diff --git a/genesis/sensors/raycaster/raycaster.py b/genesis/sensors/raycaster/raycaster.py index bc0170979..ce4712f01 100644 --- a/genesis/sensors/raycaster/raycaster.py +++ b/genesis/sensors/raycaster/raycaster.py @@ -342,7 +342,7 @@ class RaycasterOptions(RigidSensorOptionsMixin, SensorOptions): update_ground_truth_only : bool, optional If True, the sensor will only update the ground truth cache, and not the measured cache. draw_debug : bool, optional - If True and the rasterizer visualization is active, spheres will be drawn at every hit point. + If True and the interactive viewer is active, spheres will be drawn at every hit point. debug_sphere_radius: float, optional The radius of each debug hit point sphere drawn in the scene. Defaults to 0.02. """ From 6e03d1d63c23f8298b8f5d9da2eb8e196c6e1fec Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Tue, 30 Sep 2025 11:47:03 -0400 Subject: [PATCH 04/28] update example --- examples/keyboard_teleop.py | 1 + examples/sensors/lidar_teleop.py | 196 +++++++++++++++---------------- genesis/utils/keyboard.py | 28 ----- 3 files changed, 97 insertions(+), 128 deletions(-) delete mode 100644 genesis/utils/keyboard.py diff --git a/examples/keyboard_teleop.py b/examples/keyboard_teleop.py index 6e9ae25dd..c951c7cff 100644 --- a/examples/keyboard_teleop.py +++ b/examples/keyboard_teleop.py @@ -18,6 +18,7 @@ import threading import numpy as np +from examples.util.keyboard import KeyboardDevice from pynput import keyboard from scipy.spatial.transform import Rotation as R diff --git a/examples/sensors/lidar_teleop.py b/examples/sensors/lidar_teleop.py index 468af7c51..6f4e981ca 100644 --- a/examples/sensors/lidar_teleop.py +++ b/examples/sensors/lidar_teleop.py @@ -1,4 +1,5 @@ import argparse +import threading import numpy as np from pynput import keyboard @@ -6,13 +7,77 @@ import genesis as gs from genesis.sensors.raycaster.patterns import DepthCameraPattern, GridPattern, SphericalPattern from genesis.utils.geom import euler_to_quat -from genesis.utils.keyboard import KeyboardDevice +# Position and angle increments for keyboard teleop control KEY_DPOS = 0.1 KEY_DANGLE = 0.1 +# Number of obstacles to create in a ring around the robot +NUM_CYLINDERS = 8 +NUM_BOXES = 6 +CYLINDER_RING_RADIUS = 3.0 +BOX_RING_RADIUS = 5.0 + + +class KeyboardDevice: + def __init__(self): + self.pressed_keys = set() + self.lock = threading.Lock() + self.listener = keyboard.Listener(on_press=self.on_press, on_release=self.on_release) + + def start(self): + self.listener.start() + + def stop(self): + self.listener.stop() + self.listener.join() + + def on_press(self, key: keyboard.Key): + with self.lock: + self.pressed_keys.add(key) + + def on_release(self, key: keyboard.Key): + with self.lock: + self.pressed_keys.discard(key) + + def get_cmd(self): + return self.pressed_keys + + +def main(): + parser = argparse.ArgumentParser(description="Genesis LiDAR/Depth Camera Visualization with Keyboard Teleop") + parser.add_argument("-B", "--n_envs", type=int, default=0, help="Number of environments to replicate") + parser.add_argument("--cpu", action="store_true", help="Run on CPU instead of GPU") + parser.add_argument("--use-box", action="store_true", help="Use Box as robot instead of Go2") + parser.add_argument( + "-f", + "--fixed", + action="store_true", + help="Load obstacles as fixed and cast only against fixed objects (is_free=False)", + default=True, + ) + parser.add_argument( + "-nf", + "--no-fixed", + action="store_false", + dest="fixed", + help="Load obstacles as dynamic (is_free=True), raycaster will update BVH every step", + ) + parser.add_argument( + "--pattern", + type=str, + default="spherical", + choices=["spherical", "depth", "grid"], + help="Sensor pattern type", + ) + + args = parser.parse_args() + + gs.init(backend=gs.cpu if args.cpu else gs.gpu, precision="32", logging_level="info") + + kb = KeyboardDevice() + kb.start() -def build_scene(show_viewer: bool, is_free: bool) -> gs.Scene: scene = gs.Scene( viewer_options=gs.options.ViewerOptions( camera_pos=(6.0, 6.0, 4.0), @@ -20,48 +85,24 @@ def build_scene(show_viewer: bool, is_free: bool) -> gs.Scene: camera_fov=60, max_FPS=60, ), - show_viewer=show_viewer, + show_viewer=args.show_viewer, show_FPS=False, ) - scene.add_entity(gs.morphs.Plane(is_free=is_free)) + scene.add_entity(gs.morphs.Plane(is_free=args.is_free)) # create ring of obstacles to visualize raycaster sensor hits - inner_radius = 3.0 - for i in range(8): - angle = 2 * np.pi * i / 8 - x = inner_radius * np.cos(angle) - y = inner_radius * np.sin(angle) - scene.add_entity(gs.morphs.Cylinder(height=1.5, radius=0.3, pos=(x, y, 0.75), is_free=is_free)) - - outer_radius = 5.0 - for i in range(6): - angle = 2 * np.pi * i / 6 + np.pi / 6 - x = outer_radius * np.cos(angle) - y = outer_radius * np.sin(angle) - scene.add_entity(gs.morphs.Box(size=(0.5, 0.5, 2.0), pos=(x, y, 1.0), is_free=is_free)) - - return scene - - -def create_robot_with_lidar(scene, args): - """ - Create fixed-base robot with a LiDAR or Depth Camera sensor attached. - - Parameters - ---------- - scene : gs.Scene - The scene to create the robot in. - args : argparse.Namespace - The arguments to create the robot with. - - Returns - ------- - robot : gs.engine.entities.RigidEntity - The robot entity. - sensor : gs.sensors.Raycaster - The LiDAR or Depth Camera sensor. - """ + for i in range(NUM_CYLINDERS): + angle = 2 * np.pi * i / NUM_CYLINDERS + x = CYLINDER_RING_RADIUS * np.cos(angle) + y = CYLINDER_RING_RADIUS * np.sin(angle) + scene.add_entity(gs.morphs.Cylinder(height=1.5, radius=0.3, pos=(x, y, 0.75), is_free=args.is_free)) + + for i in range(NUM_BOXES): + angle = 2 * np.pi * i / NUM_BOXES + np.pi / 6 + x = BOX_RING_RADIUS * np.cos(angle) + y = BOX_RING_RADIUS * np.sin(angle) + scene.add_entity(gs.morphs.Box(size=(0.5, 0.5, 2.0), pos=(x, y, 1.0), is_free=args.is_free)) robot_kwargs = dict( pos=(0.0, 0.0, 0.35), @@ -87,26 +128,21 @@ def create_robot_with_lidar(scene, args): if args.pattern == "depth": sensor = scene.add_sensor(gs.sensors.DepthCamera(pattern=DepthCameraPattern(), **sensor_kwargs)) - return robot, sensor - elif args.pattern == "grid": - pattern_cfg = GridPattern() - else: - if args.pattern != "spherical": - gs.logger.warning(f"Unrecognized raycaster pattern: {args.pattern}. Using 'spherical' instead.") - pattern_cfg = SphericalPattern() - - sensor = scene.add_sensor(gs.sensors.Lidar(pattern=pattern_cfg, **sensor_kwargs)) - return robot, sensor - - -def run(scene: gs.Scene, robot, sensor: gs.sensors.Lidar, n_envs: int, kb: KeyboardDevice, is_depth: bool = False): - if is_depth: scene.start_recording( - data_func=(lambda: sensor.read_image()[0]) if n_envs > 0 else sensor.read_image, + data_func=(lambda: sensor.read_image()[0]) if args.n_envs > 0 else sensor.read_image, rec_options=gs.recorders.MPLImagePlot(), ) + else: + if args.pattern == "grid": + pattern_cfg = GridPattern() + else: + if args.pattern != "spherical": + gs.logger.warning(f"Unrecognized raycaster pattern: {args.pattern}. Using 'spherical' instead.") + pattern_cfg = SphericalPattern() + + sensor = scene.add_sensor(gs.sensors.Lidar(pattern=pattern_cfg, **sensor_kwargs)) - scene.build(n_envs=n_envs) + scene.build(n_envs=args.n_envs) print("Keyboard Controls:") # Avoid using same keys as interactive viewer keyboard controls @@ -125,11 +161,11 @@ def run(scene: gs.Scene, robot, sensor: gs.sensors.Lidar, n_envs: int, kb: Keybo target_euler = init_euler.copy() def apply_pose_to_all_envs(pos_np: np.ndarray, quat_np: np.ndarray): - if n_envs > 0: - pos_np = np.expand_dims(pos_np, axis=0).repeat(n_envs, axis=0) - quat_np = np.expand_dims(quat_np, axis=0).repeat(n_envs, axis=0) - robot.set_pos(pos_np, zero_velocity=False) - robot.set_quat(quat_np, zero_velocity=False) + if args.n_envs > 0: + pos_np = np.expand_dims(pos_np, axis=0).repeat(args.n_envs, axis=0) + quat_np = np.expand_dims(quat_np, axis=0).repeat(args.n_envs, axis=0) + robot.set_pos(pos_np) + robot.set_quat(quat_np) apply_pose_to_all_envs(target_pos, euler_to_quat(target_euler)) @@ -178,45 +214,5 @@ def apply_pose_to_all_envs(pos_np: np.ndarray, quat_np: np.ndarray): gs.logger.info("Simulation finished.") -def main(): - parser = argparse.ArgumentParser(description="Genesis LiDAR/Depth Camera Visualization with Keyboard Teleop") - parser.add_argument("-B", "--n_envs", type=int, default=0, help="Number of environments to replicate") - parser.add_argument("--cpu", action="store_true", help="Run on CPU instead of GPU") - parser.add_argument("--use-box", action="store_true", help="Use Box as robot instead of Go2") - parser.add_argument( - "-f", - "--fixed", - action="store_true", - help="Load obstacles as fixed and cast only against fixed objects (is_free=False)", - default=True, - ) - parser.add_argument( - "-nf", - "--no-fixed", - action="store_false", - dest="fixed", - help="Load obstacles as dynamic (is_free=True), raycaster will update BVH every step", - ) - parser.add_argument( - "--pattern", - type=str, - default="spherical", - choices=["spherical", "depth", "grid"], - help="Sensor pattern type", - ) - - args = parser.parse_args() - - gs.init(backend=gs.cpu if args.cpu else gs.gpu, precision="32", logging_level="info") - - kb = KeyboardDevice() - kb.start() - - scene = build_scene(show_viewer=True, is_free=not args.fixed) - robot, lidar = create_robot_with_lidar(scene, args) - - run(scene, robot, lidar, n_envs=args.n_envs, kb=kb, is_depth=args.pattern == "depth") - - if __name__ == "__main__": main() diff --git a/genesis/utils/keyboard.py b/genesis/utils/keyboard.py deleted file mode 100644 index d2ccd4658..000000000 --- a/genesis/utils/keyboard.py +++ /dev/null @@ -1,28 +0,0 @@ -import threading - -from pynput import keyboard - - -class KeyboardDevice: - def __init__(self): - self.pressed_keys = set() - self.lock = threading.Lock() - self.listener = keyboard.Listener(on_press=self.on_press, on_release=self.on_release) - - def start(self): - self.listener.start() - - def stop(self): - self.listener.stop() - self.listener.join() - - def on_press(self, key: keyboard.Key): - with self.lock: - self.pressed_keys.add(key) - - def on_release(self, key: keyboard.Key): - with self.lock: - self.pressed_keys.discard(key) - - def get_cmd(self): - return self.pressed_keys From c43fc0ce39ec30a824a286b36dfc7c96c3a1fb51 Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Tue, 30 Sep 2025 22:37:08 -0400 Subject: [PATCH 05/28] address review 1 part 1 --- examples/sensors/lidar_teleop.py | 128 ++++++++++-------- .../entities/rigid_entity/rigid_entity.py | 11 +- genesis/engine/scene.py | 11 +- genesis/engine/simulator.py | 2 + genesis/options/morphs.py | 2 - genesis/recorders/plotters.py | 44 +++--- genesis/sensors/__init__.py | 2 +- genesis/sensors/base_sensor.py | 5 +- genesis/sensors/contact_force.py | 41 ------ genesis/sensors/imu.py | 29 +--- genesis/sensors/raycaster/__init__.py | 3 +- genesis/sensors/raycaster/base_pattern.py | 36 +---- genesis/sensors/raycaster/depth_camera.py | 31 +---- genesis/sensors/raycaster/lidar.py | 48 ------- genesis/sensors/raycaster/patterns.py | 62 ++++----- genesis/sensors/raycaster/raycaster.py | 99 ++++---------- tests/test_sensors.py | 2 +- 17 files changed, 173 insertions(+), 383 deletions(-) delete mode 100644 genesis/sensors/raycaster/lidar.py diff --git a/examples/sensors/lidar_teleop.py b/examples/sensors/lidar_teleop.py index 6f4e981ca..bc62463d1 100644 --- a/examples/sensors/lidar_teleop.py +++ b/examples/sensors/lidar_teleop.py @@ -2,16 +2,27 @@ import threading import numpy as np -from pynput import keyboard import genesis as gs from genesis.sensors.raycaster.patterns import DepthCameraPattern, GridPattern, SphericalPattern from genesis.utils.geom import euler_to_quat +IS_PYNPUT_AVAILABLE = False +try: + from pynput import keyboard + + IS_PYNPUT_AVAILABLE = True +except ImportError: + pass + # Position and angle increments for keyboard teleop control KEY_DPOS = 0.1 KEY_DANGLE = 0.1 +# Movement when no keyboard control is available +MOVE_RADIUS = 1.0 +MOVE_RATE = 1.0 / 100.0 + # Number of obstacles to create in a ring around the robot NUM_CYLINDERS = 8 NUM_BOXES = 6 @@ -32,11 +43,11 @@ def stop(self): self.listener.stop() self.listener.join() - def on_press(self, key: keyboard.Key): + def on_press(self, key: "keyboard.Key"): with self.lock: self.pressed_keys.add(key) - def on_release(self, key: keyboard.Key): + def on_release(self, key: "keyboard.Key"): with self.lock: self.pressed_keys.discard(key) @@ -53,7 +64,7 @@ def main(): "-f", "--fixed", action="store_true", - help="Load obstacles as fixed and cast only against fixed objects (is_free=False)", + help="Load obstacles as fixed and cast only against fixed objects", default=True, ) parser.add_argument( @@ -61,7 +72,7 @@ def main(): "--no-fixed", action="store_false", dest="fixed", - help="Load obstacles as dynamic (is_free=True), raycaster will update BVH every step", + help="Load obstacles as collidable, raycaster will update BVH every step", ) parser.add_argument( "--pattern", @@ -75,9 +86,6 @@ def main(): gs.init(backend=gs.cpu if args.cpu else gs.gpu, precision="32", logging_level="info") - kb = KeyboardDevice() - kb.start() - scene = gs.Scene( viewer_options=gs.options.ViewerOptions( camera_pos=(6.0, 6.0, 4.0), @@ -85,24 +93,24 @@ def main(): camera_fov=60, max_FPS=60, ), - show_viewer=args.show_viewer, - show_FPS=False, + profiling_options=gs.options.ProfilingOptions(show_FPS=False), + show_viewer=True, ) - scene.add_entity(gs.morphs.Plane(is_free=args.is_free)) + scene.add_entity(gs.morphs.Plane()) # create ring of obstacles to visualize raycaster sensor hits for i in range(NUM_CYLINDERS): angle = 2 * np.pi * i / NUM_CYLINDERS x = CYLINDER_RING_RADIUS * np.cos(angle) y = CYLINDER_RING_RADIUS * np.sin(angle) - scene.add_entity(gs.morphs.Cylinder(height=1.5, radius=0.3, pos=(x, y, 0.75), is_free=args.is_free)) + scene.add_entity(gs.morphs.Cylinder(height=1.5, radius=0.3, pos=(x, y, 0.75))) for i in range(NUM_BOXES): angle = 2 * np.pi * i / NUM_BOXES + np.pi / 6 x = BOX_RING_RADIUS * np.cos(angle) y = BOX_RING_RADIUS * np.sin(angle) - scene.add_entity(gs.morphs.Box(size=(0.5, 0.5, 2.0), pos=(x, y, 1.0), is_free=args.is_free)) + scene.add_entity(gs.morphs.Box(size=(0.5, 0.5, 2.0), pos=(x, y, 1.0))) robot_kwargs = dict( pos=(0.0, 0.0, 0.35), @@ -144,15 +152,21 @@ def main(): scene.build(n_envs=args.n_envs) - print("Keyboard Controls:") - # Avoid using same keys as interactive viewer keyboard controls - print("[↑/↓/←/→]: Move XY") - print("[j/k]: Down/Up") - print("[n/m]: Roll CCW/CW") - print("[,/.]: Pitch Up/Down") - print("[o/p]: Yaw CCW/CW") - print("[\\]: Reset") - print("[esc]: Quit") + if IS_PYNPUT_AVAILABLE: + kb = KeyboardDevice() + kb.start() + + print("Keyboard Controls:") + # Avoid using same keys as interactive viewer keyboard controls + print("[↑/↓/←/→]: Move XY") + print("[j/k]: Down/Up") + print("[n/m]: Roll CCW/CW") + print("[,/.]: Pitch Up/Down") + print("[o/p]: Yaw CCW/CW") + print("[\\]: Reset") + print("[esc]: Quit") + else: + print("Keyboard teleop is disabled since pynput is not installed. To install, run `pip install pynput`.") init_pos = np.array([0.0, 0.0, 0.35], dtype=np.float32) init_euler = np.array([0.0, 0.0, 0.0], dtype=np.float32) @@ -171,41 +185,45 @@ def apply_pose_to_all_envs(pos_np: np.ndarray, quat_np: np.ndarray): try: while True: - pressed = kb.pressed_keys.copy() - if keyboard.Key.esc in pressed: - break - if keyboard.KeyCode.from_char("\\") in pressed: - target_pos[:] = init_pos - target_euler[:] = init_euler - - if keyboard.Key.up in pressed: - target_pos[0] += KEY_DPOS - if keyboard.Key.down in pressed: - target_pos[0] -= KEY_DPOS - if keyboard.Key.right in pressed: - target_pos[1] -= KEY_DPOS - if keyboard.Key.left in pressed: - target_pos[1] += KEY_DPOS - if keyboard.KeyCode.from_char("j") in pressed: - target_pos[2] -= KEY_DPOS - if keyboard.KeyCode.from_char("k") in pressed: - target_pos[2] += KEY_DPOS - - if keyboard.KeyCode.from_char("n") in pressed: - target_euler[0] += KEY_DANGLE # roll CCW around +X - if keyboard.KeyCode.from_char("m") in pressed: - target_euler[0] -= KEY_DANGLE # roll CW around +X - if keyboard.KeyCode.from_char(",") in pressed: - target_euler[1] += KEY_DANGLE # pitch up around +Y - if keyboard.KeyCode.from_char(".") in pressed: - target_euler[1] -= KEY_DANGLE # pitch down around +Y - if keyboard.KeyCode.from_char("o") in pressed: - target_euler[2] += KEY_DANGLE # yaw CCW around +Z - if keyboard.KeyCode.from_char("p") in pressed: - target_euler[2] -= KEY_DANGLE # yaw CW around +Z + if IS_PYNPUT_AVAILABLE: + pressed = kb.pressed_keys.copy() + if keyboard.Key.esc in pressed: + break + if keyboard.KeyCode.from_char("\\") in pressed: + target_pos[:] = init_pos + target_euler[:] = init_euler + + if keyboard.Key.up in pressed: + target_pos[0] += KEY_DPOS + if keyboard.Key.down in pressed: + target_pos[0] -= KEY_DPOS + if keyboard.Key.right in pressed: + target_pos[1] -= KEY_DPOS + if keyboard.Key.left in pressed: + target_pos[1] += KEY_DPOS + if keyboard.KeyCode.from_char("j") in pressed: + target_pos[2] -= KEY_DPOS + if keyboard.KeyCode.from_char("k") in pressed: + target_pos[2] += KEY_DPOS + + if keyboard.KeyCode.from_char("n") in pressed: + target_euler[0] += KEY_DANGLE # roll CCW around +X + if keyboard.KeyCode.from_char("m") in pressed: + target_euler[0] -= KEY_DANGLE # roll CW around +X + if keyboard.KeyCode.from_char(",") in pressed: + target_euler[1] += KEY_DANGLE # pitch up around +Y + if keyboard.KeyCode.from_char(".") in pressed: + target_euler[1] -= KEY_DANGLE # pitch down around +Y + if keyboard.KeyCode.from_char("o") in pressed: + target_euler[2] += KEY_DANGLE # yaw CCW around +Z + if keyboard.KeyCode.from_char("p") in pressed: + target_euler[2] -= KEY_DANGLE # yaw CW around +Z + else: + # move in a circle if no keyboard control + target_pos[0] = MOVE_RADIUS * np.cos(scene.t * MOVE_RATE) + target_pos[1] = MOVE_RADIUS * np.sin(scene.t * MOVE_RATE) apply_pose_to_all_envs(target_pos, euler_to_quat(target_euler)) - scene.step() except KeyboardInterrupt: diff --git a/genesis/engine/entities/rigid_entity/rigid_entity.py b/genesis/engine/entities/rigid_entity/rigid_entity.py index 17b3a582c..918114e49 100644 --- a/genesis/engine/entities/rigid_entity/rigid_entity.py +++ b/genesis/engine/entities/rigid_entity/rigid_entity.py @@ -1,9 +1,9 @@ from copy import copy from itertools import chain -from typing import Literal, TYPE_CHECKING +from typing import TYPE_CHECKING, Literal -import numpy as np import gstaichi as ti +import numpy as np import torch import trimesh @@ -11,9 +11,8 @@ from genesis.engine.materials.base import Material from genesis.options.morphs import Morph from genesis.options.surfaces import Surface -from genesis.utils import geom as gu from genesis.utils import array_class -from genesis.utils import linalg as lu +from genesis.utils import geom as gu from genesis.utils import mesh as mu from genesis.utils import mjcf as mju from genesis.utils import terrain as tu @@ -27,8 +26,8 @@ from .rigid_link import RigidLink if TYPE_CHECKING: - from genesis.engine.solvers.rigid.rigid_solver_decomp import RigidSolver from genesis.engine.scene import Scene + from genesis.engine.solvers.rigid.rigid_solver_decomp import RigidSolver @ti.data_oriented @@ -557,6 +556,8 @@ def _load_scene(self, morph, surface): def _build(self): for link in self._links: link._build() + if not link.is_fixed: + self._is_free = True self._n_qs = self.n_qs self._n_dofs = self.n_dofs diff --git a/genesis/engine/scene.py b/genesis/engine/scene.py index 4aac14d8d..d33a30f55 100644 --- a/genesis/engine/scene.py +++ b/genesis/engine/scene.py @@ -810,14 +810,9 @@ def build( self._is_built = True - # This check should be removed when deprecating `compile_kernels`. - if compile_kernels or len(self._sim._sensor_manager._sensors) > 0: - with gs.logger.timer("Compiling simulation kernels..."): - self._sim.step() - self._reset() - - # sensors - self._sim._sensor_manager.build() + with gs.logger.timer("Compiling simulation kernels..."): + self._sim.step() + self._reset() # visualizer with gs.logger.timer("Building visualizer..."): diff --git a/genesis/engine/simulator.py b/genesis/engine/simulator.py index 80a77a0c8..8980347c1 100644 --- a/genesis/engine/simulator.py +++ b/genesis/engine/simulator.py @@ -215,6 +215,8 @@ def build(self): if isinstance(entity, HybridEntity): entity.build() + self._sensor_manager.build() + def reset(self, state: SimState, envs_idx=None): for solver, solver_state in zip(self._solvers, state): if solver.n_entities > 0: diff --git a/genesis/options/morphs.py b/genesis/options/morphs.py index a5006eb78..2a463602b 100644 --- a/genesis/options/morphs.py +++ b/genesis/options/morphs.py @@ -11,13 +11,11 @@ import numpy as np import genesis as gs -import genesis.utils.geom as gu import genesis.utils.misc as mu from .misc import CoacdOptions from .options import Options - URDF_FORMAT = ".urdf" MJCF_FORMAT = ".xml" MESH_FORMATS = (".obj", ".ply", ".stl") diff --git a/genesis/recorders/plotters.py b/genesis/recorders/plotters.py index a18b046a3..01a2a7081 100644 --- a/genesis/recorders/plotters.py +++ b/genesis/recorders/plotters.py @@ -446,9 +446,12 @@ def cleanup(self): """Clean up matplotlib resources.""" super().cleanup() - self.lines.clear() - self.backgrounds.clear() + # Logger may not be available anymore + logger_exists = hasattr(gs, "logger") + if self.fig is not None: + try: + import matplotlib.pyplot as plt plt.close(self.fig) if logger_exists: @@ -463,6 +466,12 @@ def get_image_array(self): """ Capture the plot image as a video frame. + Returns + ------- + image_array : np.ndarray + The RGB image as a numpy array. + """ + from matplotlib.backends.backend_agg import FigureCanvasAgg self._lock.acquire() if isinstance(self.fig.canvas, FigureCanvasAgg): @@ -471,24 +480,21 @@ def get_image_array(self): rgba_array_flat = np.frombuffer(self.fig.canvas.buffer_rgba(), dtype=np.uint8) rgb_array = rgba_array_flat.reshape((height, width, 4))[..., :3] - import matplotlib.pyplot as plt - - self.image_plot = None - self.background = None - - self.fig, self.ax = plt.subplots(figsize=self.figsize) - self.fig.tight_layout(pad=0) - self.ax.set_axis_off() - self.fig.subplots_adjust(left=0, right=1, top=1, bottom=0) - self.image_plot = self.ax.imshow(np.zeros((1, 1)), cmap="plasma", origin="upper", aspect="auto") - self._show_fig() - - def process(self, data, cur_time): - """Process new image data and update display.""" - if isinstance(data, torch.Tensor): - img_data = tensor_to_array(data) + # Rescale image if necessary + if (width, height) != tuple(self._options.window_size): + img = Image.fromarray(rgb_array) + img = img.resize(self._options.window_size, resample=Image.BILINEAR) + rgb_array = np.asarray(img) + else: + rgb_array = rgb_array.copy() else: - img_data = np.asarray(data) + # Slower but more generic fallback only if necessary + buffer = io.BytesIO() + self.fig.canvas.print_figure(buffer, format="png", dpi="figure") + buffer.seek(0) + img = Image.open(buffer) + rgb_array = np.asarray(img.convert("RGB")) + self._lock.release() return rgb_array diff --git a/genesis/sensors/__init__.py b/genesis/sensors/__init__.py index ff71125c6..3ac1e4f2c 100644 --- a/genesis/sensors/__init__.py +++ b/genesis/sensors/__init__.py @@ -3,6 +3,6 @@ from .contact_force import ContactSensorOptions as Contact from .imu import IMUOptions as IMU from .raycaster import DepthCameraOptions as DepthCamera -from .raycaster import LidarOptions as Lidar +from .raycaster import RaycasterOptions as Lidar from .raycaster import RaycasterOptions as Raycaster from .sensor_manager import SensorManager diff --git a/genesis/sensors/base_sensor.py b/genesis/sensors/base_sensor.py index 3c17944de..e11a7e278 100644 --- a/genesis/sensors/base_sensor.py +++ b/genesis/sensors/base_sensor.py @@ -75,6 +75,7 @@ def validate(self, scene): ) +# Note: dataclass is used as opposed to pydantic.BaseModel since torch.Tensors are not supported by default @dataclass class SharedSensorMetadata: """ @@ -436,16 +437,12 @@ class NoisySensorOptionsMixin: The standard deviation of the additive white noise. random_walk : float | tuple[float, ...], optional The standard deviation of the random walk, which acts as accumulated bias drift. - delay : float, optional - The delay in seconds, affecting how outdated the sensor data is when it is read. jitter : float, optional The jitter in seconds modeled as a a random additive delay sampled from a normal distribution. Jitter cannot be greater than delay. `interpolate` should be True when `jitter` is greater than 0. interpolate : bool, optional If True, the sensor data is interpolated between data points for delay + jitter. Otherwise, the sensor data at the closest time step will be used. Default is False. - update_ground_truth_only : bool, optional - If True, the sensor will only update the ground truth data, and not the measured data. """ resolution: float | tuple[float, ...] = 0.0 diff --git a/genesis/sensors/contact_force.py b/genesis/sensors/contact_force.py index c86825034..f64eb3934 100644 --- a/genesis/sensors/contact_force.py +++ b/genesis/sensors/contact_force.py @@ -76,16 +76,6 @@ class ContactSensorOptions(RigidSensorOptionsMixin, SensorOptions): Parameters ---------- - entity_idx : int - The global entity index of the RigidEntity to which this sensor is attached. - link_idx_local : int, optional - The local index of the RigidLink of the RigidEntity to which this sensor is attached. - delay : float - The delay in seconds before the sensor data is read. - update_ground_truth_only : bool - If True, the sensor will only update the ground truth data, and not the measured data. - draw_debug : bool, optional - If True and the interactive viewer is active, a sphere will be drawn at the sensor's position. debug_sphere_radius : float, optional The radius of the debug sphere. Defaults to 0.05. debug_color : float, optional @@ -131,9 +121,6 @@ def build(self): self._shared_metadata.expanded_links_idx, link_idx, expand=(1,), dim=0 ) - if self._options.draw_debug: - self.debug_object = None - def _get_return_format(self) -> tuple[int, ...]: return (1,) @@ -197,35 +184,10 @@ class ContactForceSensorOptions(RigidSensorOptionsMixin, NoisySensorOptionsMixin Parameters ---------- - entity_idx : int - The global entity index of the RigidEntity to which this sensor is attached. - link_idx_local : int, optional - The local index of the RigidLink of the RigidEntity to which this sensor is attached. min_force : float | tuple[float, float, float], optional The minimum detectable absolute force per each axis. Values below this will be treated as 0. Default is 0. max_force : float | tuple[float, float, float], optional The maximum output absolute force per each axis. Values above this will be clipped. Default is infinity. - resolution : float | tuple[float, float, float], optional - The measurement resolution of each axis of force (smallest increment of change in the sensor reading). - Default is 0.0, which means no quantization is applied. - bias : float | tuple[float, float, float], optional - The constant additive bias of the sensor. - noise : float | tuple[float, float, float], optional - The standard deviation of the additive white noise. - random_walk : float | tuple[float, float, float], optional - The standard deviation of the random walk, which acts as accumulated bias drift. - delay : float, optional - The delay in seconds, affecting how outdated the sensor data is when it is read. - jitter : float, optional - The jitter in seconds modeled as a a random additive delay sampled from a normal distribution. - Jitter cannot be greater than delay. `interpolate` should be True when `jitter` is greater than 0. - interpolate : bool, optional - If True, the sensor data is interpolated between data points for delay + jitter. - Otherwise, the sensor data at the closest time step will be used. Default is False. - update_ground_truth_only : bool, optional - If True, the sensor will only update the ground truth data, and not the measured data. - draw_debug : bool, optional - If True and the interactive viewer is active, an arrow for the contact force will be drawn. debug_color : float, optional The rgba color of the debug arrow. Defaults to (1.0, 0.0, 1.0, 0.5). debug_scale : float, optional @@ -301,9 +263,6 @@ def build(self): _to_tuple(self._options.max_force, length_per_value=3), ) - if self._options.draw_debug: - self.debug_object = None - def _get_return_format(self) -> tuple[int, ...]: return (3,) diff --git a/genesis/sensors/imu.py b/genesis/sensors/imu.py index 9f17cbcc6..0456c37cc 100644 --- a/genesis/sensors/imu.py +++ b/genesis/sensors/imu.py @@ -25,6 +25,7 @@ from .sensor_manager import register_sensor if TYPE_CHECKING: + from genesis.ext.pyrender.mesh import Mesh from genesis.utils.ring_buffer import TensorRingBuffer from genesis.vis.rasterizer_context import RasterizerContext @@ -81,14 +82,6 @@ class IMUOptions(RigidSensorOptionsMixin, NoisySensorOptionsMixin, SensorOptions Parameters ---------- - entity_idx : int - The global entity index of the RigidEntity to which this IMU sensor is attached. - link_idx_local : int, optional - The local index of the RigidLink of the RigidEntity to which this IMU sensor is attached. - pos_offset : tuple[float, float, float], optional - The positional offset of the IMU sensor from the RigidLink. - euler_offset : tuple[float, float, float], optional - The rotational offset of the IMU sensor from the RigidLink in degrees. acc_resolution : float, optional The measurement resolution of the accelerometer (smallest increment of change in the sensor reading). Default is 0.0, which means no quantization is applied. @@ -115,18 +108,6 @@ class IMUOptions(RigidSensorOptionsMixin, NoisySensorOptionsMixin, SensorOptions The standard deviation of the white noise for each axis of the gyroscope. gyro_random_walk : tuple[float, float, float] The standard deviation of the bias drift for each axis of the gyroscope. - delay : float, optional - The delay in seconds, affecting how outdated the sensor data is when it is read. - jitter : float, optional - The jitter in seconds modeled as a a random additive delay sampled from a normal distribution. - Jitter cannot be greater than delay. `interpolate` should be True when `jitter` is greater than 0. - interpolate : bool, optional - If True, the sensor data is interpolated between data points for delay + jitter. - Otherwise, the sensor data at the closest time step will be used. Default is False. - update_ground_truth_only : bool, optional - If True, the sensor will only update the ground truth data, and not the measured data. - draw_debug : bool, optional - If True and the interactive viewer is active, an arrow for linear acceleration will be drawn. debug_acc_color : float, optional The rgba color of the debug acceleration arrow. Defaults to (0.0, 1.0, 1.0, 0.5). debug_acc_scale: float, optional @@ -184,6 +165,12 @@ class IMUSensor( NoisySensorMixin[IMUSharedMetadata], Sensor[IMUSharedMetadata], ): + def __init__(self, options: IMUOptions, shared_metadata: IMUSharedMetadata, manager: "gs.SensorManager"): + super().__init__(options, shared_metadata, manager) + self.debug_objects: list["Mesh | None"] = [None, None] + self.quat_offset: torch.Tensor + self.pos_offset: torch.Tensor + @gs.assert_built def set_acc_axes_skew(self, axes_skew: MaybeMatrix3x3Type, envs_idx=None): envs_idx = self._sanitize_envs_idx(envs_idx) @@ -256,9 +243,7 @@ def build(self): expand=(self._manager._sim._B, 2, 3, 3), dim=1, ) - if self._options.draw_debug: - self.debug_objects = [None, None] self.quat_offset = self._shared_metadata.offsets_quat[0, self._idx] self.pos_offset = self._shared_metadata.offsets_pos[0, self._idx] diff --git a/genesis/sensors/raycaster/__init__.py b/genesis/sensors/raycaster/__init__.py index 445a7f67e..8464c4102 100644 --- a/genesis/sensors/raycaster/__init__.py +++ b/genesis/sensors/raycaster/__init__.py @@ -1,6 +1,5 @@ -from .base_pattern import DynamicPatternGenerator, RaycastPattern, RaycastPatternGenerator +from .base_pattern import RaycastPattern, RaycastPatternGenerator from .depth_camera import DepthCameraOptions -from .lidar import LidarOptions from .patterns import ( DepthCameraPattern, GridPattern, diff --git a/genesis/sensors/raycaster/base_pattern.py b/genesis/sensors/raycaster/base_pattern.py index 3e709b41c..9814da852 100644 --- a/genesis/sensors/raycaster/base_pattern.py +++ b/genesis/sensors/raycaster/base_pattern.py @@ -1,4 +1,5 @@ from dataclasses import dataclass +from typing import TypeVar import torch @@ -11,10 +12,6 @@ class RaycastPattern: Base class for raycast pattern options. """ - def validate(self): - """Validate the pattern options.""" - pass - def get_return_shape(self) -> tuple[int, ...]: """Get the shape of the ray vectors, e.g. (n_scan_lines, n_points_per_line) or (n_rays,)""" raise NotImplementedError(f"{type(self).__name__} must implement `get_return_shape()`.") @@ -46,39 +43,14 @@ def get_ray_starts(self) -> torch.Tensor: return torch.zeros((*self._return_shape, 3), dtype=gs.tc_float, device=gs.device) -class DynamicPatternGenerator(RaycastPatternGenerator): - """Base class for dynamic raycast pattern generators.""" - - def __init__(self, options: RaycastPattern): - super().__init__(options) - self._cached_ray_directions: torch.Tensor = torch.empty(self._return_shape, dtype=gs.tc_float, device=gs.device) - - def update_ray_directions(self, cur_t: float): - """ - Compute the local direction vectors of the rays and save them to `self._cached_ray_dirs`. - - Parameters - ---------- - cur_t : float - Current simulation time in seconds. - """ - raise NotImplementedError(f"{type(self).__name__} must implement `update_ray_directions()`.") - - def get_ray_directions(self) -> torch.Tensor: - """ - Get the local direction vectors of the rays. - """ - if self._cached_ray_directions.isnan().any(): - self.update_ray_directions(self._cached_ray_directions) - return self._cached_ray_directions - - RAYCAST_PATTERN_NAME_TO_OPTIONS_MAP: dict[str, type[RaycastPattern]] = {} RAYCAST_PATTERN_OPTIONS_TO_GENERATOR_MAP: dict[type[RaycastPattern], type[RaycastPatternGenerator]] = {} def register_pattern(pattern_type: type[RaycastPattern], name: str): - def _impl(generator_type: type[RaycastPatternGenerator]): + T = TypeVar("T", bound=type[RaycastPatternGenerator]) + + def _impl(generator_type: T) -> T: RAYCAST_PATTERN_NAME_TO_OPTIONS_MAP[name] = pattern_type RAYCAST_PATTERN_OPTIONS_TO_GENERATOR_MAP[pattern_type] = generator_type return generator_type diff --git a/genesis/sensors/raycaster/depth_camera.py b/genesis/sensors/raycaster/depth_camera.py index c76e63927..5911d97be 100644 --- a/genesis/sensors/raycaster/depth_camera.py +++ b/genesis/sensors/raycaster/depth_camera.py @@ -1,6 +1,5 @@ import torch -import genesis as gs from genesis.sensors.sensor_manager import register_sensor from .patterns import DepthCameraPattern @@ -13,39 +12,11 @@ class DepthCameraOptions(RaycasterOptions): Parameters ---------- - entity_idx : int - The global entity index of the RigidEntity to which this sensor is attached. - link_idx_local : int, optional - The local index of the RigidLink of the RigidEntity to which this sensor is attached. - pos_offset : tuple[float, float, float], optional - The mounting offset position of the sensor in the world frame. Defaults to (0.0, 0.0, 0.0). - euler_offset : tuple[float, float, float], optional - The mounting offset quaternion of the sensor in the world frame. Defaults to (0.0, 0.0, 0.0). pattern: DepthCameraPattern The raycasting pattern configuration for the sensor. - min_range : float, optional - The minimum sensing range in meters. Defaults to 0.0. - max_range : float, optional - The maximum sensing range in meters. Defaults to 20.0. - return_world_frame : bool, optional - Whether to return points in the world frame. Defaults to False (local frame). - only_cast_fixed : bool, optional - Whether to only cast rays on fixed geoms. Defaults to False. This is a shared option, so the value of this - option for the **first** lidar sensor will be the behavior for **all** Lidar sensors. - delay : float, optional - The delay in seconds before the sensor data is read. - update_ground_truth_only : bool, optional - If True, the sensor will only update the ground truth cache, and not the measured cache. - draw_debug : bool, optional - If True and the interactive viewer is active, spheres will be drawn at every hit point. - debug_sphere_radius: float, optional - The radius of each debug hit point sphere drawn in the scene. Defaults to 0.02. """ - def validate(self, scene): - super().validate(scene) - if not isinstance(self.pattern, DepthCameraPattern): - gs.raise_exception("DepthCamera pattern must be an instance of DepthCameraPattern") + pattern: DepthCameraPattern = DepthCameraPattern() @register_sensor(DepthCameraOptions, RaycasterSharedMetadata) diff --git a/genesis/sensors/raycaster/lidar.py b/genesis/sensors/raycaster/lidar.py deleted file mode 100644 index 4e9af4bf0..000000000 --- a/genesis/sensors/raycaster/lidar.py +++ /dev/null @@ -1,48 +0,0 @@ -from genesis.sensors.sensor_manager import register_sensor - -from .raycaster import RaycasterOptions, RaycasterSensor, RaycasterSharedMetadata - - -class LidarOptions(RaycasterOptions): - """ - Lidar sensor that performs ray casting to get distance measurements and point clouds. - - Parameters - ---------- - entity_idx : int - The global entity index of the RigidEntity to which this sensor is attached. - link_idx_local : int, optional - The local index of the RigidLink of the RigidEntity to which this sensor is attached. - pos_offset : tuple[float, float, float], optional - The mounting offset position of the sensor in the world frame. Defaults to (0.0, 0.0, 0.0). - euler_offset : tuple[float, float, float], optional - The mounting offset quaternion of the sensor in the world frame. Defaults to (0.0, 0.0, 0.0). - pattern: RaycasterPattern - The raycasting pattern configuration for the sensor. - min_range : float, optional - The minimum sensing range in meters. Defaults to 0.0. - max_range : float, optional - The maximum sensing range in meters. Defaults to 20.0. - no_hit_value : float, optional - The value to return for no hit. Defaults to max_range if not specified. - return_world_frame : bool, optional - Whether to return points in the world frame. Defaults to False (local frame). - only_cast_fixed : bool, optional - Whether to only cast rays on fixed geoms. Defaults to False. This is a shared option, so the value of this - option for the **first** lidar sensor will be the behavior for **all** Lidar sensors. - delay : float, optional - The delay in seconds before the sensor data is read. - update_ground_truth_only : bool, optional - If True, the sensor will only update the ground truth cache, and not the measured cache. - draw_debug : bool, optional - If True and the interactive viewer is active, spheres will be drawn at every hit point. - debug_sphere_radius: float, optional - The radius of each debug hit point sphere drawn in the scene. Defaults to 0.02. - """ - - pass - - -@register_sensor(LidarOptions, RaycasterSharedMetadata) -class LidarSensor(RaycasterSensor): - pass diff --git a/genesis/sensors/raycaster/patterns.py b/genesis/sensors/raycaster/patterns.py index 4abec7945..4ae4badf8 100644 --- a/genesis/sensors/raycaster/patterns.py +++ b/genesis/sensors/raycaster/patterns.py @@ -15,8 +15,10 @@ def _generate_uniform_angles( fov: float | tuple[float, float] | None = None, res: float | None = None, angles: Sequence[float] | None = None, - use_degrees: bool = True, ) -> torch.Tensor: + """ + Helper function to generate uniform angles given various formats (n and fov, res and fov, or angles). + """ if angles is None: assert fov is not None, "FOV should be provided if angles not given." @@ -39,24 +41,23 @@ def _generate_uniform_angles( f_min = -f_max fov_size = fov - full_rotation = 360.0 if use_degrees else math.pi - assert fov_size <= full_rotation + gs.EPS, "FOV should not be larger than a full rotation." + assert fov_size <= 360.0 + gs.EPS, "FOV should not be larger than a full rotation." # avoid duplicate angle at 0/360 degrees - if fov_size >= full_rotation - gs.EPS: + if fov_size >= 360.0 - gs.EPS: f_max -= fov_size / (n - 1) * 0.5 angles = torch.linspace(f_min, f_max, n, dtype=gs.tc_float, device=gs.device) - if use_degrees: - angles = torch.deg2rad(angles) - return angles def _compute_focal_lengths( width: int, height: int, fov_horizontal: float | None, fov_vertical: float | None ) -> tuple[float, float]: + """ + Helper function to compute focal lengths given image dimensions and FOV. + """ if fov_horizontal is not None and fov_vertical is None: fh_rad = math.radians(fov_horizontal) fv_rad = 2.0 * math.atan((height / width) * math.tan(fh_rad / 2.0)) @@ -74,6 +75,8 @@ def _compute_focal_lengths( # ============================== Generic Patterns ============================== + + @dataclass class GridPattern(RaycastPattern): """ @@ -89,20 +92,15 @@ class GridPattern(RaycastPattern): Grid dimensions (length, width) in meters. direction : tuple[float, float, float] Ray direction vector. - ordering : str - Point ordering, either "xy" or "yx". """ resolution: float = 0.1 size: tuple[float, float] = (2.0, 2.0) direction: tuple[float, float, float] = (0.0, 0.0, -1.0) - ordering: str = "xy" - def validate(self): - if self.ordering not in ["xy", "yx"]: - raise ValueError(f"Ordering must be 'xy' or 'yx'. Received: '{self.ordering}'.") - if self.resolution <= 0: - raise ValueError(f"Resolution must be greater than 0. Received: '{self.resolution}'.") + def __post_init__(self): + if self.resolution < 1e-3: + gs.raise_exception(f"Resolution should be at least 1e-3 (1mm). Received: '{self.resolution}'.") def get_return_shape(self) -> tuple[int, ...]: num_x = math.ceil(self.size[0] / self.resolution) + 1 @@ -116,28 +114,20 @@ class GridPatternGenerator(RaycastPatternGenerator): def __init__(self, options: GridPattern): super().__init__(options) - self.x_coords = torch.arange( - -options.size[0] / 2, options.size[0] / 2 + gs.EPS, options.resolution, dtype=gs.tc_float, device=gs.device - ) - self.y_coords = torch.arange( - -options.size[1] / 2, options.size[1] / 2 + gs.EPS, options.resolution, dtype=gs.tc_float, device=gs.device - ) + self.coords = [ + torch.arange(-size / 2, size / 2 + gs.EPS, options.resolution, dtype=gs.tc_float, device=gs.device) + for size in options.size + ] self.direction = torch.tensor(options.direction, dtype=gs.tc_float, device=gs.device) def get_ray_directions(self) -> torch.Tensor: return self.direction.expand((*self._return_shape, 3)) def get_ray_starts(self) -> torch.Tensor: - if self._options.ordering == "xy": - grid_x, grid_y = torch.meshgrid(self.x_coords, self.y_coords, indexing="xy") - else: - grid_x, grid_y = torch.meshgrid(self.x_coords, self.y_coords, indexing="ij") - - starts = torch.empty((*self._return_shape, 3), dtype=gs.tc_float, device=gs.device) + grid_x, grid_y = torch.meshgrid(*self.coords, indexing="xy") + starts = torch.zeros((*self._return_shape, 3), dtype=gs.tc_float, device=gs.device) starts[..., 0] = grid_x starts[..., 1] = grid_y - starts[..., 2] = 0.0 - return starts @@ -170,8 +160,6 @@ class SphericalPattern(RaycastPattern): Array of elevation angles. Overrides n_vertical/res_vertical/fov_vertical if provided. angles_horizontal: Sequence[float], optional Array of azimuth angles. Overrides n_horizontal/res_horizontal/fov_horizontal if provided. - use_degrees : bool, optional - Whether the provided angles are in degrees or radians. Defaults to True. """ fov_vertical: float | tuple[float, float] = 30.0 @@ -182,15 +170,13 @@ class SphericalPattern(RaycastPattern): res_horizontal: float | None = None vertical_angles: Sequence[float] | None = None horizontal_angles: Sequence[float] | None = None - use_degrees: bool = True def validate(self): - full_rotation = 360.0 if self.use_degrees else math.pi * 2.0 for fov in (self.fov_vertical, self.fov_horizontal): - if (isinstance(fov, float) and (fov < 0 or fov > full_rotation + gs.EPS)) or ( - isinstance(fov, tuple) and (fov[1] - fov[0] > full_rotation + gs.EPS) + if (isinstance(fov, float) and (fov < 0 or fov > 360.0 + gs.EPS)) or ( + isinstance(fov, tuple) and (fov[1] - fov[0] > 360.0 + gs.EPS) ): - gs.raise_exception(f"[{type(self).__class__}] FOV should not be <0 or >{full_rotation}. Got: {fov}.") + gs.raise_exception(f"[{type(self).__class__}] FOV should be between 0 and 360. Got: {fov}.") def get_return_shape(self) -> tuple[int, ...]: if self.vertical_angles is not None and self.horizontal_angles is not None: @@ -209,14 +195,12 @@ def get_ray_directions(self) -> torch.Tensor: fov=self._options.fov_vertical, res=self._options.res_vertical, angles=self._options.vertical_angles, - use_degrees=self._options.use_degrees, ) h_angles = _generate_uniform_angles( n=self._options.n_horizontal, fov=self._options.fov_horizontal, res=self._options.res_horizontal, angles=self._options.horizontal_angles, - use_degrees=self._options.use_degrees, ) h_grid, v_grid = torch.meshgrid(h_angles, v_angles, indexing="ij") @@ -288,7 +272,7 @@ def get_ray_directions(self) -> torch.Tensor: # standard camera frame coordinates x_c = (uu - cx) / fx y_c = (vv - cy) / fy - z_c = torch.ones_like(x_c, dtype=gs.tc_float, device=gs.device) + z_c = torch.ones_like(x_c) # transform to robotics camera frame dirs = torch.stack([z_c, -x_c, -y_c], dim=-1) diff --git a/genesis/sensors/raycaster/raycaster.py b/genesis/sensors/raycaster/raycaster.py index ce4712f01..5efd9f582 100644 --- a/genesis/sensors/raycaster/raycaster.py +++ b/genesis/sensors/raycaster/raycaster.py @@ -42,6 +42,7 @@ (0.2, 0.6, 1.0, 1.0), (1.0, 1.0, 0.2, 1.0), ) +STACK_SIZE = ti.static(64) @ti.func @@ -51,7 +52,7 @@ def ray_triangle_intersection(ray_start, ray_dir, v0, v1, v2): Returns: vec4(t, u, v, hit) where hit=1.0 if intersection found, 0.0 otherwise """ - result = ti.math.vec4(0.0, 0.0, 0.0, 0.0) + result = ti.Vector.zero(gs.ti_float, 4) edge1 = v1 - v0 edge2 = v2 - v0 @@ -63,12 +64,12 @@ def ray_triangle_intersection(ray_start, ray_dir, v0, v1, v2): # Check all conditions in sequence without early returns valid = True - t = 0.0 - u = 0.0 - v = 0.0 - f = 0.0 - s = ti.math.vec3(0.0, 0.0, 0.0) - q = ti.math.vec3(0.0, 0.0, 0.0) + t = gs.ti_float(0.0) + u = gs.ti_float(0.0) + v = gs.ti_float(0.0) + f = gs.ti_float(0.0) + s = ti.Vector.zero(gs.ti_float, 3) + q = ti.Vector.zero(gs.ti_float, 3) # If determinant is near zero, ray lies in plane of triangle if ti.abs(a) < gs.EPS: @@ -112,29 +113,22 @@ def ray_aabb_intersection(ray_start, ray_dir, aabb_min, aabb_max): result = -1.0 # Use the slab method for ray-AABB intersection + sign = ti.select(ray_dir >= 0.0, 1.0, -1.0) + ray_dir = sign * ti.max(ti.abs(ray_dir), gs.EPS) inv_dir = 1.0 / ray_dir - # Handle potential division by zero with large values - large_value = ti.static(1 / gs.EPS) - if ti.abs(ray_dir.x) < gs.EPS: - inv_dir.x = large_value if ray_dir.x >= 0.0 else -large_value - if ti.abs(ray_dir.y) < gs.EPS: - inv_dir.y = large_value if ray_dir.y >= 0.0 else -large_value - if ti.abs(ray_dir.z) < gs.EPS: - inv_dir.z = large_value if ray_dir.z >= 0.0 else -large_value - t1 = (aabb_min - ray_start) * inv_dir t2 = (aabb_max - ray_start) * inv_dir tmin = ti.min(t1, t2) tmax = ti.max(t1, t2) - t_near = ti.max(ti.max(tmin.x, tmin.y), tmin.z) - t_far = ti.min(ti.min(tmax.x, tmax.y), tmax.z) + t_near = ti.max(tmin.x, tmin.y, tmin.z, 0.0) + t_far = ti.min(tmax.x, tmax.y, tmax.z) # Check if ray intersects AABB - if t_near <= t_far and t_far >= 0.0: - result = ti.max(t_near, 0.0) + if t_near <= t_far: + result = t_near return result @@ -190,7 +184,6 @@ def kernel_cast_lidar_rays( The result `output_hits` will be a 2D array of shape (n_env, n_points * 4) where in the second dimension, the first n_points * 3 are hit points and the last n_points is hit ranges. """ - STACK_SIZE = ti.static(64) n_triangles = map_lidar_faces.shape[0] n_points = ray_starts.shape[0] @@ -215,13 +208,13 @@ def kernel_cast_lidar_rays( hit_face = -1 # Stack for non-recursive traversal - stack = ti.Vector.zero(ti.i32, STACK_SIZE) - stack[0] = 0 # Start traversal at the root node (index 0) - stack_ptr = 1 + node_stack = ti.Vector.zero(ti.i32, STACK_SIZE) + node_stack[0] = 0 # Start traversal at the root node (index 0) + stack_idx = 1 - while stack_ptr > 0: - stack_ptr -= 1 - node_idx = stack[stack_ptr] + while stack_idx > 0: + stack_idx -= 1 + node_idx = node_stack[stack_idx] node = bvh_nodes[i_b, node_idx] @@ -268,16 +261,12 @@ def kernel_cast_lidar_rays( # hit_u, hit_v could be stored here if needed else: # It's an INTERNAL node - assert node.left >= 0 - assert node.right >= 0 - assert node.left < bvh_nodes.shape[1] - assert node.right < bvh_nodes.shape[1] # Push children onto the stack for further traversal # Make sure stack doesn't overflow - if stack_ptr < ti.static(STACK_SIZE - 2): - stack[stack_ptr] = node.left - stack[stack_ptr + 1] = node.right - stack_ptr += 2 + if stack_idx < ti.static(STACK_SIZE - 2): + node_stack[stack_idx] = node.left + node_stack[stack_idx + 1] = node.right + stack_idx += 2 # --- 3. Process Hit Result --- if hit_face >= 0: @@ -292,11 +281,7 @@ def kernel_cast_lidar_rays( else: # Local frame output along provided local ray direction hit_point = dist * ti_normalize( - ti.math.vec3( - ray_directions[i_p, 0], - ray_directions[i_p, 1], - ray_directions[i_p, 2], - ) + ti.math.vec3(ray_directions[i_p, 0], ray_directions[i_p, 1], ray_directions[i_p, 2]) ) output_hits[i_b, i_p * 3 + 0] = hit_point.x output_hits[i_b, i_p * 3 + 1] = hit_point.y @@ -415,14 +400,6 @@ def build(self): n_batches=self._shared_metadata.solver.free_verts_state.pos.shape[1], n_aabbs=n_lidar_faces ) - print("RaycasterSensor::build preupdate -----") - print("aabb n_batches") - print(self._shared_metadata.aabb.n_batches) - print("aabb n_aabbs") - print(self._shared_metadata.aabb.n_aabbs) - print("aabbs") - print(self._shared_metadata.aabb.aabbs) - rigid_solver_decomp.kernel_update_all_verts( geoms_state=self._shared_metadata.solver.geoms_state, verts_info=self._shared_metadata.solver.verts_info, @@ -441,19 +418,6 @@ def build(self): self._shared_metadata.bvh = LBVH(self._shared_metadata.aabb) self._shared_metadata.bvh.build() - print("RaycasterSensor::build -----") - print("aabb n_batches") - print(self._shared_metadata.aabb.n_batches) - print("aabb n_aabbs") - print(self._shared_metadata.aabb.n_aabbs) - print("aabbs") - print(self._shared_metadata.aabb.aabbs) - print() - print("bvh nodes") - print(self._shared_metadata.bvh.nodes) - print("bvh morton codes") - print(self._shared_metadata.bvh.morton_codes) - self.pattern_generator = create_pattern_generator(self._options.pattern) self._shared_metadata.pattern_generators.append(self.pattern_generator) pos_offset = self._shared_metadata.offsets_pos[-1, :] @@ -527,19 +491,6 @@ def _update_shared_ground_truth_cache( links_pos = links_pos.unsqueeze(0) links_quat = links_quat.unsqueeze(0) - print("RaycasterSensor::update -----") - print("aabb n_batches") - print(shared_metadata.aabb.n_batches) - print("aabb n_aabbs") - print(shared_metadata.aabb.n_aabbs) - print("aabbs") - print(shared_metadata.aabb.aabbs) - print() - print("bvh nodes") - print(shared_metadata.bvh.nodes) - print("bvh morton codes") - print(shared_metadata.bvh.morton_codes) - kernel_cast_lidar_rays( map_lidar_faces=shared_metadata.map_lidar_faces, fixed_verts_state=shared_metadata.solver.fixed_verts_state, diff --git a/tests/test_sensors.py b/tests/test_sensors.py index 54c11f685..0e97d5670 100644 --- a/tests/test_sensors.py +++ b/tests/test_sensors.py @@ -299,7 +299,7 @@ def test_lidar_grid_pattern_ground_distance(show_viewer, tol, n_envs, is_fixed): distances = sensor_data["hit_ranges"] expected_shape = (NUM_RAYS_XY, NUM_RAYS_XY) if n_envs == 0 else (n_envs, NUM_RAYS_XY, NUM_RAYS_XY) - assert distances.shape == expected_shape, f"Expected distances shape {expected_shape}, got {distances.shape}" + assert distances.shape == expected_shape assert_allclose(hit_points[..., 2], 0.0, tol=tol, err_msg="LiDAR hit point should be at ground level (z≈0)") assert_allclose( From d2c20fb59a990ba36fa24d70f30f05c52dc4db6d Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Wed, 1 Oct 2025 01:58:25 -0400 Subject: [PATCH 06/28] address review 1 part 2 --- examples/sensors/imu_franka.py | 22 +++-- genesis/sensors/base_sensor.py | 48 ++++------ genesis/sensors/contact_force.py | 35 +++++-- genesis/sensors/imu.py | 35 ++++--- genesis/sensors/raycaster/depth_camera.py | 2 +- genesis/sensors/raycaster/raycaster.py | 107 ++++++++++++---------- genesis/sensors/sensor_manager.py | 12 ++- tests/test_sensors.py | 42 ++++----- 8 files changed, 172 insertions(+), 131 deletions(-) diff --git a/examples/sensors/imu_franka.py b/examples/sensors/imu_franka.py index 85df8c6f4..16cdc9ee1 100644 --- a/examples/sensors/imu_franka.py +++ b/examples/sensors/imu_franka.py @@ -59,26 +59,34 @@ def main(): draw_debug=True, ) ) - labels = {"lin_acc": ("acc_x", "acc_y", "acc_z"), "ang_vel": ("gyro_x", "gyro_y", "gyro_z")} if args.vis: + xyz = ("x", "y", "z") + labels = {"lin_acc": xyz, "true_lin_acc": xyz, "ang_vel": xyz, "true_ang_vel": xyz} + + def data_func(): + data = imu.read() + true_data = imu.read_ground_truth() + return { + "lin_acc": data.lin_acc, + "true_lin_acc": true_data.lin_acc, + "ang_vel": data.ang_vel, + "true_ang_vel": true_data.ang_vel, + } + if IS_PYQTGRAPH_AVAILABLE: - imu.start_recording(gs.recorders.PyQtLinePlot(title="IMU Measured Data", labels=labels)) scene.start_recording( - imu.read_ground_truth, + data_func, gs.recorders.PyQtLinePlot(title="IMU Ground Truth Data", labels=labels), ) elif IS_MATPLOTLIB_AVAILABLE: gs.logger.info("pyqtgraph not found, falling back to matplotlib.") - imu.start_recording(gs.recorders.MPLLinePlot(title="IMU Measured Data", labels=labels)) scene.start_recording( - imu.read_ground_truth, + data_func, gs.recorders.MPLLinePlot(title="IMU Ground Truth Data", labels=labels), ) else: print("matplotlib or pyqtgraph not found, skipping real-time plotting.") - imu.start_recording(gs.recorders.NPZFile(filename="imu_data.npz")) - ########################## build ########################## scene.build() diff --git a/genesis/sensors/base_sensor.py b/genesis/sensors/base_sensor.py index e11a7e278..956c37337 100644 --- a/genesis/sensors/base_sensor.py +++ b/genesis/sensors/base_sensor.py @@ -1,6 +1,6 @@ from dataclasses import dataclass, field from functools import partial -from typing import TYPE_CHECKING, Generic, Sequence, TypeVar +from typing import TYPE_CHECKING, Generic, Sequence, Type, TypeVar import gstaichi as ti import numpy as np @@ -104,7 +104,9 @@ class Sensor(RBC, Generic[SharedSensorMetadataT]): the shared cache to return the correct data. """ - def __init__(self, sensor_options: "SensorOptions", sensor_idx: int, sensor_manager: "SensorManager"): + def __init__( + self, sensor_options: "SensorOptions", sensor_idx: int, data_cls: Type[tuple], sensor_manager: "SensorManager" + ): self._options: "SensorOptions" = sensor_options self._idx: int = sensor_idx self._manager: "SensorManager" = sensor_manager @@ -115,14 +117,13 @@ def __init__(self, sensor_options: "SensorOptions", sensor_idx: int, sensor_mana self._delay_ts = round(self._options.delay / self._dt) self._cache_slices: list[slice] = [] - self._return_format = self._get_return_format() - is_return_dict = isinstance(self._return_format, dict) - if is_return_dict: - self._return_shapes = self._return_format.values() - self._get_formatted_data = self._get_formatted_data_dict - else: - self._return_shapes = (self._return_format,) - self._get_formatted_data = self._get_formatted_data_tuple + self._return_data_class = data_cls + return_format = self._get_return_format() + assert len(return_format) > 0 + if isinstance(return_format[0], int): + return_format = (return_format,) + self._return_shapes: tuple[tuple[int, ...], ...] = return_format + self._cache_size = 0 for shape in self._return_shapes: data_size = np.prod(shape) @@ -164,17 +165,15 @@ def reset(cls, shared_metadata: SharedSensorMetadataT, envs_idx): """ pass - def _get_return_format(self) -> dict[str, tuple[int, ...]] | tuple[int, ...]: + def _get_return_format(self) -> tuple[int | tuple[int, ...], ...]: """ Get the data format of the read() return value. Returns ------- - return_format : dict | tuple - - If tuple, the final shape of the read() return value. - e.g. (2, 3) means read() will return a tensor of shape (2, 3). - - If dict a dictionary with string keys and tensor values will be returned. - e.g. {"pos": (3,), "quat": (4,)} returns a dict of tensors [0:3] and [3:7] from the cache. + return_format : tuple[tuple[int, ...], ...] + The output shape(s) of the tensor data returned by read(), e.g. (2, 3) means read() will return a single + tensor of shape (2, 3) and ((3,), (3,)) would return two tensors of shape (3,). """ raise NotImplementedError(f"{type(self).__name__} has not implemented `get_return_format()`.") @@ -307,9 +306,9 @@ def _apply_delay_to_shared_cache( tensor_start += tensor_size - def _get_return_values(self, tensor: torch.Tensor, envs_idx=None) -> list[torch.Tensor]: + def _get_formatted_data(self, tensor: torch.Tensor, envs_idx=None) -> torch.Tensor: """ - Preprares the given tensor into multiple tensors matching `self._return_shapes`. + Returns tensor(s) matching the return format. Note that this method does not clone the data tensor, it should have been cloned by the caller. """ @@ -320,20 +319,13 @@ def _get_return_values(self, tensor: torch.Tensor, envs_idx=None) -> list[torch. for i, shape in enumerate(self._return_shapes): field_data = tensor_chunk[..., self._cache_slices[i]].reshape((len(envs_idx), *shape)) - if self._manager._sim.n_envs == 0: field_data = field_data.squeeze(0) return_values.append(field_data) - return return_values - - def _get_formatted_data_dict(self, tensor: torch.Tensor, envs_idx=None) -> dict[str, torch.Tensor]: - """Returns a dictionary of tensors matching the return format.""" - return dict(zip(self._return_format.keys(), self._get_return_values(tensor, envs_idx))) - - def _get_formatted_data_tuple(self, tensor: torch.Tensor, envs_idx=None) -> torch.Tensor: - """Returns a tensor matching the return format.""" - return self._get_return_values(tensor, envs_idx)[0] + if len(return_values) == 1: + return return_values[0] + return self._return_data_class(*return_values) def _sanitize_envs_idx(self, envs_idx) -> torch.Tensor: return self._manager._sim._scene._sanitize_envs_idx(envs_idx) diff --git a/genesis/sensors/contact_force.py b/genesis/sensors/contact_force.py index f64eb3934..ec0c5a3be 100644 --- a/genesis/sensors/contact_force.py +++ b/genesis/sensors/contact_force.py @@ -1,5 +1,5 @@ from dataclasses import dataclass -from typing import TYPE_CHECKING, Sequence +from typing import TYPE_CHECKING, Sequence, Type import gstaichi as ti import numpy as np @@ -103,8 +103,15 @@ class ContactSensor(Sensor): Sensor that returns bool based on whether associated RigidLink is in contact. """ - def __init__(self, sensor_options: ContactSensorOptions, sensor_idx: int, sensor_manager: "SensorManager"): - super().__init__(sensor_options, sensor_idx, sensor_manager) + def __init__( + self, + sensor_options: ContactSensorOptions, + sensor_idx: int, + data_cls: Type[tuple], + sensor_manager: "SensorManager", + ): + super().__init__(sensor_options, sensor_idx, data_cls, sensor_manager) + self._link: "RigidLink" | None = None self.debug_object: "Mesh" | None = None @@ -241,8 +248,15 @@ class ContactForceSensor( Sensor that returns the total contact force being applied to the associated RigidLink in its local frame. """ - def __init__(self, sensor_options: ContactForceSensorOptions, sensor_idx: int, sensor_manager: "SensorManager"): - super().__init__(sensor_options, sensor_idx, sensor_manager) + def __init__( + self, + sensor_options: ContactForceSensorOptions, + sensor_idx: int, + data_cls: Type[tuple], + sensor_manager: "SensorManager", + ): + super().__init__(sensor_options, sensor_idx, data_cls, sensor_manager) + self.debug_object: "Mesh" | None = None def build(self): @@ -334,9 +348,14 @@ def _draw_debug(self, context: "RasterizerContext"): pos = self._link.get_pos(envs_idx=env_idx) quat = self._link.get_quat(envs_idx=env_idx) - force = self.read(envs_idx=env_idx if self._manager._sim.n_envs > 0 else None) - vec = tensor_to_array(transform_by_quat(force * self._options.debug_scale, quat)) + # TODO: cleanup env handling + if self._manager._sim.n_envs > 0: + force = self.read(envs_idx=env_idx) + vec = tensor_to_array(transform_by_quat(force * self._options.debug_scale, quat))[0] + else: + force = self.read() # cannot specify envs_idx when n_envs=0 + vec = tensor_to_array(transform_by_quat(force * self._options.debug_scale, quat)) if self.debug_object is not None: context.clear_debug_object(self.debug_object) - self.debug_object = context.draw_debug_arrow(pos=pos[0], vec=vec[0], color=self._options.debug_color) + self.debug_object = context.draw_debug_arrow(pos=pos[0], vec=vec, color=self._options.debug_color) diff --git a/genesis/sensors/imu.py b/genesis/sensors/imu.py index 0456c37cc..ae8a6cb25 100644 --- a/genesis/sensors/imu.py +++ b/genesis/sensors/imu.py @@ -1,5 +1,5 @@ from dataclasses import dataclass -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, NamedTuple, Type import gstaichi as ti import numpy as np @@ -158,15 +158,27 @@ class IMUSharedMetadata(RigidSensorMetadataMixin, NoisySensorMetadataMixin, Shar gyro_indices: torch.Tensor = make_tensor_field((0, 0), dtype_factory=lambda: gs.tc_int) -@register_sensor(IMUOptions, IMUSharedMetadata) +class IMUData(NamedTuple): + lin_acc: torch.Tensor + ang_vel: torch.Tensor + + +@register_sensor(IMUOptions, IMUSharedMetadata, IMUData) @ti.data_oriented class IMUSensor( RigidSensorMixin[IMUSharedMetadata], NoisySensorMixin[IMUSharedMetadata], Sensor[IMUSharedMetadata], ): - def __init__(self, options: IMUOptions, shared_metadata: IMUSharedMetadata, manager: "gs.SensorManager"): - super().__init__(options, shared_metadata, manager) + def __init__( + self, + options: IMUOptions, + shared_metadata: IMUSharedMetadata, + data_cls: Type[IMUData], + manager: "gs.SensorManager", + ): + super().__init__(options, shared_metadata, data_cls, manager) + self.debug_objects: list["Mesh | None"] = [None, None] self.quat_offset: torch.Tensor self.pos_offset: torch.Tensor @@ -247,11 +259,8 @@ def build(self): self.quat_offset = self._shared_metadata.offsets_quat[0, self._idx] self.pos_offset = self._shared_metadata.offsets_pos[0, self._idx] - def _get_return_format(self) -> dict[str, tuple[int, ...]]: - return { - "lin_acc": (3,), - "ang_vel": (3,), - } + def _get_return_format(self) -> tuple[tuple[int, ...], ...]: + return (3,), (3,) @classmethod def _get_cache_dtype(cls) -> torch.dtype: @@ -324,9 +333,11 @@ def _draw_debug(self, context: "RasterizerContext"): quat = self._link.get_quat(envs_idx=env_idx) pos = self._link.get_pos(envs_idx=env_idx) + transform_by_quat(self.pos_offset, quat) - data = self.read(envs_idx=env_idx) - acc_vec = data["lin_acc"] * self._options.debug_acc_scale - gyro_vec = data["ang_vel"] * self._options.debug_gyro_scale + # cannot specify envs_idx for read() when n_envs=0 + data = self.read(envs_idx=env_idx if self._manager._sim.n_envs > 0 else None) + acc_vec = data.lin_acc * self._options.debug_acc_scale + gyro_vec = data.ang_vel * self._options.debug_gyro_scale + # transform from local frame to world frame offset_quat = transform_quat_by_quat(self.quat_offset, quat) acc_vec = tensor_to_array(transform_by_quat(acc_vec, offset_quat)) diff --git a/genesis/sensors/raycaster/depth_camera.py b/genesis/sensors/raycaster/depth_camera.py index 5911d97be..9ccd5fbeb 100644 --- a/genesis/sensors/raycaster/depth_camera.py +++ b/genesis/sensors/raycaster/depth_camera.py @@ -32,4 +32,4 @@ def read_image(self) -> torch.Tensor: torch.Tensor The depth image with shape (height, width). """ - return self.read()["hit_ranges"].reshape(self._options.pattern.height, self._options.pattern.width) + return self.read().hit_ranges.reshape(self._options.pattern.height, self._options.pattern.width) diff --git a/genesis/sensors/raycaster/raycaster.py b/genesis/sensors/raycaster/raycaster.py index 5efd9f582..5b23124ed 100644 --- a/genesis/sensors/raycaster/raycaster.py +++ b/genesis/sensors/raycaster/raycaster.py @@ -1,10 +1,9 @@ import itertools import math from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, NamedTuple import gstaichi as ti -import numpy as np import torch import genesis as gs @@ -19,7 +18,7 @@ transform_by_quat, transform_by_trans_quat, ) -from genesis.utils.misc import concat_with_tensor, make_tensor_field +from genesis.utils.misc import concat_with_tensor, make_tensor_field, ti_to_torch from genesis.vis.rasterizer_context import RasterizerContext from ..base_sensor import ( @@ -367,56 +366,64 @@ class RaycasterSharedMetadata(RigidSensorMetadataMixin, SharedSensorMetadata): points_to_sensor_idx: torch.Tensor = make_tensor_field((0,), dtype_factory=lambda: gs.tc_int) -@register_sensor(RaycasterOptions, RaycasterSharedMetadata) +class RaycasterData(NamedTuple): + hit_points: torch.Tensor + hit_ranges: torch.Tensor + + +@register_sensor(RaycasterOptions, RaycasterSharedMetadata, RaycasterData) @ti.data_oriented class RaycasterSensor(RigidSensorMixin, Sensor): + @classmethod + def _build_bvh(cls, shared_metadata: RaycasterSharedMetadata): + n_lidar_faces = shared_metadata.solver.faces_info.geom_idx.shape[0] + torch_map_lidar_faces = torch.arange(n_lidar_faces, dtype=torch.int32, device=gs.device) + if shared_metadata.only_cast_fixed: + # count the number of faces in a fixed geoms + geom_is_fixed = torch.logical_not(ti_to_torch(shared_metadata.solver.geoms_info.is_free)) + faces_geom = ti_to_torch(shared_metadata.solver.faces_info.geom_idx) + n_lidar_faces = torch.sum(geom_is_fixed[faces_geom]).item() + if n_lidar_faces == 0: + gs.raise_exception( + "No fixed geoms found in the scene. To use only_cast_fixed, " + "at least some entities should have is_free=False." + ) + torch_map_lidar_faces = torch.where(geom_is_fixed[faces_geom])[0] + + shared_metadata.map_lidar_faces = ti.field(ti.i32, (n_lidar_faces)) + shared_metadata.map_lidar_faces.from_torch(torch_map_lidar_faces) + shared_metadata.n_lidar_faces = n_lidar_faces + + shared_metadata.aabb = AABB( + n_batches=shared_metadata.solver.free_verts_state.pos.shape[1], n_aabbs=n_lidar_faces + ) + + rigid_solver_decomp.kernel_update_all_verts( + geoms_state=shared_metadata.solver.geoms_state, + verts_info=shared_metadata.solver.verts_info, + free_verts_state=shared_metadata.solver.free_verts_state, + fixed_verts_state=shared_metadata.solver.fixed_verts_state, + ) + + kernel_update_aabbs( + map_lidar_faces=shared_metadata.map_lidar_faces, + free_verts_state=shared_metadata.solver.free_verts_state, + fixed_verts_state=shared_metadata.solver.fixed_verts_state, + verts_info=shared_metadata.solver.verts_info, + faces_info=shared_metadata.solver.faces_info, + aabb_state=shared_metadata.aabb, + ) + shared_metadata.bvh = LBVH(shared_metadata.aabb) + shared_metadata.bvh.build() + def build(self): super().build() # set shared metadata from RigidSensorMixin # first lidar sensor initialization: build aabb and bvh if self._shared_metadata.bvh is None: self._shared_metadata.only_cast_fixed = self._options.only_cast_fixed # set for first only - - n_lidar_faces = self._shared_metadata.solver.faces_info.geom_idx.shape[0] - np_map_lidar_faces = np.arange(n_lidar_faces) - if self._shared_metadata.only_cast_fixed: - # count the number of faces in a fixed geoms - geom_is_fixed = np.logical_not(self._shared_metadata.solver.geoms_info.is_free.to_numpy()) - faces_geom = self._shared_metadata.solver.faces_info.geom_idx.to_numpy() - n_lidar_faces = np.sum(geom_is_fixed[faces_geom]) - if n_lidar_faces == 0: - gs.raise_exception( - "No fixed geoms found in the scene. To use only_cast_fixed, " - "at least some entities should have is_free=False." - ) - np_map_lidar_faces = np.where(geom_is_fixed[faces_geom])[0] - - self._shared_metadata.map_lidar_faces = ti.field(ti.i32, (n_lidar_faces)) - self._shared_metadata.map_lidar_faces.from_numpy(np_map_lidar_faces) - self._shared_metadata.n_lidar_faces = n_lidar_faces - - self._shared_metadata.aabb = AABB( - n_batches=self._shared_metadata.solver.free_verts_state.pos.shape[1], n_aabbs=n_lidar_faces - ) - - rigid_solver_decomp.kernel_update_all_verts( - geoms_state=self._shared_metadata.solver.geoms_state, - verts_info=self._shared_metadata.solver.verts_info, - free_verts_state=self._shared_metadata.solver.free_verts_state, - fixed_verts_state=self._shared_metadata.solver.fixed_verts_state, - ) - - kernel_update_aabbs( - map_lidar_faces=self._shared_metadata.map_lidar_faces, - free_verts_state=self._shared_metadata.solver.free_verts_state, - fixed_verts_state=self._shared_metadata.solver.fixed_verts_state, - verts_info=self._shared_metadata.solver.verts_info, - faces_info=self._shared_metadata.solver.faces_info, - aabb_state=self._shared_metadata.aabb, - ) - self._shared_metadata.bvh = LBVH(self._shared_metadata.aabb) - self._shared_metadata.bvh.build() + self._build_bvh(self._shared_metadata) self.pattern_generator = create_pattern_generator(self._options.pattern) self._shared_metadata.pattern_generators.append(self.pattern_generator) @@ -453,12 +460,14 @@ def build(self): if self._options.draw_debug: self.debug_objects = [None] * self._manager._sim._B - def _get_return_format(self) -> dict[str, tuple[int, ...]]: + @classmethod + def reset(cls, shared_metadata: RaycasterSharedMetadata, envs_idx): + super().reset(shared_metadata, envs_idx) + cls._build_bvh(shared_metadata) + + def _get_return_format(self) -> tuple[tuple[int, ...], ...]: shape = self._options.pattern.get_return_shape() - return { - "hit_points": (*shape, 3), - "hit_ranges": shape, - } + return (*shape, 3), shape @classmethod def _get_cache_dtype(cls) -> torch.dtype: diff --git a/genesis/sensors/sensor_manager.py b/genesis/sensors/sensor_manager.py index 627545599..cfab7ffee 100644 --- a/genesis/sensors/sensor_manager.py +++ b/genesis/sensors/sensor_manager.py @@ -11,7 +11,7 @@ class SensorManager: - SENSOR_TYPES_MAP: dict[Type["SensorOptions"], tuple[Type["Sensor"], Type["SharedSensorMetadata"]]] = {} + SENSOR_TYPES_MAP: dict[Type["SensorOptions"], tuple[Type["Sensor"], Type["SharedSensorMetadata"], Type[tuple]]] = {} def __init__(self, sim): self._sim = sim @@ -27,11 +27,11 @@ def __init__(self, sim): def create_sensor(self, sensor_options: "SensorOptions") -> "Sensor": sensor_options.validate(self._sim.scene) - sensor_cls, metadata_cls = SensorManager.SENSOR_TYPES_MAP[type(sensor_options)] + sensor_cls, metadata_cls, data_cls = SensorManager.SENSOR_TYPES_MAP[type(sensor_options)] self._sensors_by_type.setdefault(sensor_cls, []) if sensor_cls not in self._sensors_metadata: self._sensors_metadata[sensor_cls] = metadata_cls() - sensor = sensor_cls(sensor_options, len(self._sensors_by_type[sensor_cls]), self) + sensor = sensor_cls(sensor_options, len(self._sensors_by_type[sensor_cls]), data_cls, self) self._sensors_by_type[sensor_cls].append(sensor) return sensor @@ -119,9 +119,11 @@ def sensors(self): return tuple([sensor for sensor_list in self._sensors_by_type.values() for sensor in sensor_list]) -def register_sensor(options_cls: Type["SensorOptions"], metadata_cls: Type["SharedSensorMetadata"]): +def register_sensor( + options_cls: Type["SensorOptions"], metadata_cls: Type["SharedSensorMetadata"], data_cls: Type[tuple] = tuple +): def _impl(sensor_cls: Type["Sensor"]): - SensorManager.SENSOR_TYPES_MAP[options_cls] = sensor_cls, metadata_cls + SensorManager.SENSOR_TYPES_MAP[options_cls] = sensor_cls, metadata_cls, data_cls return sensor_cls return _impl diff --git a/tests/test_sensors.py b/tests/test_sensors.py index 0e97d5670..d0089722d 100644 --- a/tests/test_sensors.py +++ b/tests/test_sensors.py @@ -84,12 +84,12 @@ def test_imu_sensor(show_viewer, tol, n_envs): # IMU should calculate "classical linear acceleration" using the local frame without accounting for gravity # acc_classical_lin_z = - theta_dot ** 2 - cos(theta) * g - assert_allclose(imu_biased.read()["lin_acc"], expand_batch_dim(BIAS, n_envs), tol=tol) - assert_allclose(imu_biased.read()["ang_vel"], expand_batch_dim(BIAS, n_envs), tol=tol) - assert_allclose(imu_delayed.read()["lin_acc"], 0.0, tol=tol) - assert_allclose(imu_delayed.read()["ang_vel"], 0.0, tol=tol) - assert_allclose(imu_noisy.read()["lin_acc"], 0.0, tol=1e-1) - assert_allclose(imu_noisy.read()["ang_vel"], 0.0, tol=1e-1) + assert_allclose(imu_biased.read().lin_acc, expand_batch_dim(BIAS, n_envs), tol=tol) + assert_allclose(imu_biased.read().ang_vel, expand_batch_dim(BIAS, n_envs), tol=tol) + assert_allclose(imu_delayed.read().lin_acc, 0.0, tol=tol) + assert_allclose(imu_delayed.read().ang_vel, 0.0, tol=tol) + assert_allclose(imu_noisy.read().lin_acc, 0.0, tol=1e-1) + assert_allclose(imu_noisy.read().ang_vel, 0.0, tol=1e-1) # shift COM to induce angular velocity com_shift = torch.tensor([[0.1, 0.1, 0.1]]) @@ -109,21 +109,21 @@ def test_imu_sensor(show_viewer, tol, n_envs): for _ in range(DELAY_STEPS): scene.step() - assert_array_equal(imu_delayed.read()["lin_acc"], true_imu_delayed_reading["lin_acc"]) - assert_array_equal(imu_delayed.read()["ang_vel"], true_imu_delayed_reading["ang_vel"]) + assert_array_equal(imu_delayed.read().lin_acc, true_imu_delayed_reading.lin_acc) + assert_array_equal(imu_delayed.read().ang_vel, true_imu_delayed_reading.ang_vel) # let box collide with ground for _ in range(20): scene.step() - assert_array_equal(imu_biased.read_ground_truth()["lin_acc"], imu_delayed.read_ground_truth()["lin_acc"]) - assert_array_equal(imu_biased.read_ground_truth()["ang_vel"], imu_delayed.read_ground_truth()["ang_vel"]) + assert_array_equal(imu_biased.read_ground_truth().lin_acc, imu_delayed.read_ground_truth().lin_acc) + assert_array_equal(imu_biased.read_ground_truth().ang_vel, imu_delayed.read_ground_truth().ang_vel) with np.testing.assert_raises(AssertionError, msg="Angular velocity should not be zero due to COM shift"): - assert_allclose(imu_biased.read_ground_truth()["ang_vel"], 0.0, tol=tol) + assert_allclose(imu_biased.read_ground_truth().ang_vel, 0.0, tol=tol) with np.testing.assert_raises(AssertionError, msg="Delayed data should not be equal to the ground truth data"): - assert_array_equal(imu_delayed.read()["lin_acc"] - imu_delayed.read_ground_truth()["lin_acc"], 0.0) + assert_array_equal(imu_delayed.read().lin_acc - imu_delayed.read_ground_truth().lin_acc, 0.0) zero_com_shift = torch.tensor([[0.0, 0.0, 0.0]]) box.set_COM_shift(zero_com_shift.expand((n_envs, 1, 3)) if n_envs > 0 else zero_com_shift) @@ -132,22 +132,22 @@ def test_imu_sensor(show_viewer, tol, n_envs): for _ in range(80): scene.step() - assert_allclose(imu_skewed.read()["lin_acc"], -GRAVITY, tol=5e-6) + assert_allclose(imu_skewed.read().lin_acc, -GRAVITY, tol=5e-6) assert_allclose( - imu_biased.read()["lin_acc"], + imu_biased.read().lin_acc, expand_batch_dim((BIAS[0], BIAS[1], BIAS[2] - GRAVITY), n_envs), tol=5e-6, ) - assert_allclose(imu_biased.read()["ang_vel"], expand_batch_dim(BIAS, n_envs), tol=1e-5) + assert_allclose(imu_biased.read().ang_vel, expand_batch_dim(BIAS, n_envs), tol=1e-5) scene.reset() - assert_allclose(imu_biased.read()["lin_acc"], 0.0, tol=gs.EPS) # biased, but cache hasn't been updated yet - assert_allclose(imu_delayed.read()["lin_acc"], 0.0, tol=gs.EPS) - assert_allclose(imu_noisy.read()["ang_vel"], 0.0, tol=gs.EPS) + assert_allclose(imu_biased.read().lin_acc, 0.0, tol=gs.EPS) # biased, but cache hasn't been updated yet + assert_allclose(imu_delayed.read().lin_acc, 0.0, tol=gs.EPS) + assert_allclose(imu_noisy.read().ang_vel, 0.0, tol=gs.EPS) scene.step() - assert_allclose(imu_biased.read()["lin_acc"], expand_batch_dim(BIAS, n_envs), tol=tol) + assert_allclose(imu_biased.read().lin_acc, expand_batch_dim(BIAS, n_envs), tol=tol) @pytest.mark.required @@ -295,8 +295,8 @@ def test_lidar_grid_pattern_ground_distance(show_viewer, tol, n_envs, is_fixed): scene.step() sensor_data = lidar.read() - hit_points = sensor_data["hit_points"] - distances = sensor_data["hit_ranges"] + hit_points = sensor_data.hit_points + distances = sensor_data.hit_ranges expected_shape = (NUM_RAYS_XY, NUM_RAYS_XY) if n_envs == 0 else (n_envs, NUM_RAYS_XY, NUM_RAYS_XY) assert distances.shape == expected_shape From 9d28b308e861100b70c33a184f5ba3f561767670 Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Wed, 1 Oct 2025 03:24:15 -0400 Subject: [PATCH 07/28] address review 1 part 3: simplify raycastpattern --- genesis/recorders/base_recorder.py | 2 +- genesis/recorders/file_writers.py | 17 +- genesis/recorders/recorder_manager.py | 1 - genesis/sensors/contact_force.py | 4 +- genesis/sensors/raycaster/__init__.py | 1 - genesis/sensors/raycaster/base_pattern.py | 63 ---- genesis/sensors/raycaster/depth_camera.py | 6 +- genesis/sensors/raycaster/patterns.py | 376 +++++++++++++--------- genesis/sensors/raycaster/raycaster.py | 38 ++- genesis/sensors/sensor_manager.py | 2 +- 10 files changed, 254 insertions(+), 256 deletions(-) delete mode 100644 genesis/sensors/raycaster/base_pattern.py diff --git a/genesis/recorders/base_recorder.py b/genesis/recorders/base_recorder.py index cb31724b3..c3a34a694 100644 --- a/genesis/recorders/base_recorder.py +++ b/genesis/recorders/base_recorder.py @@ -33,7 +33,7 @@ class RecorderOptions(Options): buffer_size: int = 0 buffer_full_wait_time: float = 0.1 - def validate(self): + def model_post_init(self, context): """Validate the recorder options values before the recorder is added to the scene.""" if self.hz is not None and self.hz < gs.EPS: gs.raise_exception(f"[{type(self).__name__}] recording hz should be greater than 0.") diff --git a/genesis/recorders/file_writers.py b/genesis/recorders/file_writers.py index c1b13cde2..e5c48c656 100644 --- a/genesis/recorders/file_writers.py +++ b/genesis/recorders/file_writers.py @@ -1,7 +1,7 @@ import csv import os -from pathlib import Path from collections import defaultdict +from pathlib import Path import numpy as np import torch @@ -13,7 +13,6 @@ from .base_recorder import Recorder, RecorderOptions from .recorder_manager import register_recording - IS_PYAV_AVAILABLE = False try: import av @@ -107,10 +106,10 @@ class VideoFileWriterOptions(BaseFileWriterOptions): bitrate: float = 1.0 codec_options: dict[str, str] = Field(default_factory=dict) - def validate(self): - super().validate() + def model_post_init(self, context): + super().model_post_init(context) - if not self.codec in av.codecs_available: + if self.codec not in av.codecs_available: gs.raise_exception(f"[{type(self).__name__}] Codec '{self._options.codec}' not supported.") if not self.filename.endswith(".mp4"): @@ -240,8 +239,8 @@ class CSVFileWriterOptions(BaseFileWriterOptions): header: tuple[str, ...] | None = None save_every_write: bool = False - def validate(self): - super().validate() + def model_post_init(self, context): + super().model_post_init(context) if not self.filename.lower().endswith(".csv"): gs.raise_exception(f"[{type(self).__name__}] CSV output must be a .csv file") @@ -322,8 +321,8 @@ class NPZFileWriterOptions(BaseFileWriterOptions): If True, a counter will be added to the filename and incremented on each reset. """ - def validate(self): - super().validate() + def model_post_init(self, context): + super().model_post_init(context) if not self.filename.lower().endswith(".npz"): gs.raise_exception(f"[{type(self).__name__}] NPZ output must be an .npz file") diff --git a/genesis/recorders/recorder_manager.py b/genesis/recorders/recorder_manager.py index bfc37b4b3..6ad6c2d3b 100644 --- a/genesis/recorders/recorder_manager.py +++ b/genesis/recorders/recorder_manager.py @@ -42,7 +42,6 @@ def add_recorder(self, data_func: Callable[[], Any], rec_options: "RecorderOptio recorder : Recorder The created recorder object. """ - rec_options.validate() recorder_cls = RecorderManager.RECORDER_TYPES_MAP[type(rec_options)] recorder = recorder_cls(self, rec_options, data_func) self._recorders.append(recorder) diff --git a/genesis/sensors/contact_force.py b/genesis/sensors/contact_force.py index ec0c5a3be..73495e338 100644 --- a/genesis/sensors/contact_force.py +++ b/genesis/sensors/contact_force.py @@ -96,7 +96,7 @@ class ContactSensorMetadata(SharedSensorMetadata): expanded_links_idx: torch.Tensor = make_tensor_field((0,), dtype_factory=lambda: gs.tc_int) -@register_sensor(ContactSensorOptions, ContactSensorMetadata) +@register_sensor(ContactSensorOptions, ContactSensorMetadata, tuple) @ti.data_oriented class ContactSensor(Sensor): """ @@ -237,7 +237,7 @@ class ContactForceSensorMetadata(RigidSensorMetadataMixin, NoisySensorMetadataMi max_force: torch.Tensor = make_tensor_field((0, 3)) -@register_sensor(ContactForceSensorOptions, ContactForceSensorMetadata) +@register_sensor(ContactForceSensorOptions, ContactForceSensorMetadata, tuple) @ti.data_oriented class ContactForceSensor( RigidSensorMixin[ContactForceSensorMetadata], diff --git a/genesis/sensors/raycaster/__init__.py b/genesis/sensors/raycaster/__init__.py index 8464c4102..fc48bfd79 100644 --- a/genesis/sensors/raycaster/__init__.py +++ b/genesis/sensors/raycaster/__init__.py @@ -1,4 +1,3 @@ -from .base_pattern import RaycastPattern, RaycastPatternGenerator from .depth_camera import DepthCameraOptions from .patterns import ( DepthCameraPattern, diff --git a/genesis/sensors/raycaster/base_pattern.py b/genesis/sensors/raycaster/base_pattern.py deleted file mode 100644 index 9814da852..000000000 --- a/genesis/sensors/raycaster/base_pattern.py +++ /dev/null @@ -1,63 +0,0 @@ -from dataclasses import dataclass -from typing import TypeVar - -import torch - -import genesis as gs - - -@dataclass -class RaycastPattern: - """ - Base class for raycast pattern options. - """ - - def get_return_shape(self) -> tuple[int, ...]: - """Get the shape of the ray vectors, e.g. (n_scan_lines, n_points_per_line) or (n_rays,)""" - raise NotImplementedError(f"{type(self).__name__} must implement `get_return_shape()`.") - - -class RaycastPatternGenerator: - """ - Base class for raycast pattern generators. - - Implementations should be registered using the @register_pattern decorator. - """ - - def __init__(self, options: RaycastPattern): - self._options: RaycastPattern = options - self._return_shape: tuple[int, ...] = self._options.get_return_shape() - - def get_ray_directions(self) -> torch.Tensor: - """ - Get the local direction vectors of the rays. - """ - raise NotImplementedError(f"{type(self).__name__} must implement `get_ray_directions()`.") - - def get_ray_starts(self) -> torch.Tensor: - """ - Get the local start positions of the rays. - - As a default, return zeros which means all rays start at the local origin. - """ - return torch.zeros((*self._return_shape, 3), dtype=gs.tc_float, device=gs.device) - - -RAYCAST_PATTERN_NAME_TO_OPTIONS_MAP: dict[str, type[RaycastPattern]] = {} -RAYCAST_PATTERN_OPTIONS_TO_GENERATOR_MAP: dict[type[RaycastPattern], type[RaycastPatternGenerator]] = {} - - -def register_pattern(pattern_type: type[RaycastPattern], name: str): - T = TypeVar("T", bound=type[RaycastPatternGenerator]) - - def _impl(generator_type: T) -> T: - RAYCAST_PATTERN_NAME_TO_OPTIONS_MAP[name] = pattern_type - RAYCAST_PATTERN_OPTIONS_TO_GENERATOR_MAP[pattern_type] = generator_type - return generator_type - - return _impl - - -def create_pattern_generator(pattern: RaycastPattern) -> RaycastPatternGenerator: - generator_cls = RAYCAST_PATTERN_OPTIONS_TO_GENERATOR_MAP[type(pattern)] - return generator_cls(pattern) diff --git a/genesis/sensors/raycaster/depth_camera.py b/genesis/sensors/raycaster/depth_camera.py index 9ccd5fbeb..65a292c19 100644 --- a/genesis/sensors/raycaster/depth_camera.py +++ b/genesis/sensors/raycaster/depth_camera.py @@ -3,7 +3,7 @@ from genesis.sensors.sensor_manager import register_sensor from .patterns import DepthCameraPattern -from .raycaster import RaycasterOptions, RaycasterSensor, RaycasterSharedMetadata +from .raycaster import RaycasterData, RaycasterOptions, RaycasterSensor, RaycasterSharedMetadata class DepthCameraOptions(RaycasterOptions): @@ -16,10 +16,10 @@ class DepthCameraOptions(RaycasterOptions): The raycasting pattern configuration for the sensor. """ - pattern: DepthCameraPattern = DepthCameraPattern() + pattern: DepthCameraPattern -@register_sensor(DepthCameraOptions, RaycasterSharedMetadata) +@register_sensor(DepthCameraOptions, RaycasterSharedMetadata, RaycasterData) class DepthCameraSensor(RaycasterSensor): def read_image(self) -> torch.Tensor: """ diff --git a/genesis/sensors/raycaster/patterns.py b/genesis/sensors/raycaster/patterns.py index 4ae4badf8..4f524468e 100644 --- a/genesis/sensors/raycaster/patterns.py +++ b/genesis/sensors/raycaster/patterns.py @@ -7,7 +7,94 @@ import genesis as gs from genesis.utils.geom import spherical_to_cartesian -from .base_pattern import RaycastPattern, RaycastPatternGenerator, register_pattern + +@dataclass +class RaycastPattern: + """ + Base class for raycast patterns. + """ + + def __init__(self): + self._return_shape: tuple[int, ...] = self._get_return_shape() + self._ray_dirs: torch.Tensor = torch.empty((*self._return_shape, 3), dtype=gs.tc_float, device=gs.device) + self._ray_starts: torch.Tensor = torch.empty((*self._return_shape, 3), dtype=gs.tc_float, device=gs.device) + + def _get_return_shape(self) -> tuple[int, ...]: + """Get the shape of the ray vectors, e.g. (n_scan_lines, n_points_per_line) or (n_rays,)""" + raise NotImplementedError(f"{type(self).__name__} must implement `get_return_shape()`.") + + def compute_ray_dirs(self): + """ + Update ray_dirs, the local direction vectors of the rays. + """ + raise NotImplementedError(f"{type(self).__name__} must implement `compute_ray_dirs()`.") + + def compute_ray_starts(self): + """ + Update ray_starts, the local start positions of the rays. + + As a default, all rays will start at the local origin. + """ + self._ray_starts.fill_(0.0) + + @property + def return_shape(self) -> tuple[int, ...]: + return self._return_shape + + @property + def ray_dirs(self) -> torch.Tensor: + return self._ray_dirs + + @property + def ray_starts(self) -> torch.Tensor: + return self._ray_starts + + +# ============================== Generic Patterns ============================== + + +class GridPattern(RaycastPattern): + """ + Configuration for grid-based ray casting. + + Defines a 2D grid of rays in the sensor coordinate system. + + Parameters + ---------- + resolution : float + Grid spacing in meters. + size : tuple[float, float] + Grid dimensions (length, width) in meters. + direction : tuple[float, float, float] + Ray direction vector. + """ + + def __init__( + self, + resolution: float = 0.1, + size: tuple[float, float] = (2.0, 2.0), + direction: tuple[float, float, float] = (0.0, 0.0, -1.0), + ): + if resolution < 1e-3: + gs.raise_exception(f"Resolution should be at least 1e-3 (1mm). Got `{resolution}`.") + self.coords = [ + torch.arange(-size / 2, size / 2 + gs.EPS, resolution, dtype=gs.tc_float, device=gs.device) for size in size + ] + self.direction = torch.tensor(direction, dtype=gs.tc_float, device=gs.device) + + super().__init__() + + def _get_return_shape(self) -> tuple[int, ...]: + return (len(self.coords[0]), len(self.coords[1])) + + def compute_ray_dirs(self): + self._ray_dirs[:] = self.direction.expand((*self._return_shape, 3)) + + def compute_ray_starts(self): + grid_x, grid_y = torch.meshgrid(*self.coords, indexing="xy") + self._ray_starts[..., 0] = grid_x + self._ray_starts[..., 1] = grid_y + self._ray_starts[..., 2] = 0.0 def _generate_uniform_angles( @@ -48,10 +135,105 @@ def _generate_uniform_angles( f_max -= fov_size / (n - 1) * 0.5 angles = torch.linspace(f_min, f_max, n, dtype=gs.tc_float, device=gs.device) + else: + angles = torch.tensor(angles, dtype=gs.tc_float, device=gs.device) return angles +class SphericalPattern(RaycastPattern): + """ + Configuration for spherical ray pattern. + + Either specify: + - (`n_points`, `fov`) for uniform spacing by count. + - (`angular_resolution`, `fov`) for uniform spacing by resolution. + - `angles` for custom angles. + + + Parameters + ---------- + fov: tuple[float | tuple[float, float], float | tuple[float, float]] + Field of view in degrees for horizontal and vertical directions. Defaults to (30.0, 360.0). + If a single float is provided, the FOV is centered around 0 degrees. + If a tuple is provided, it specifies the (min, max) angles. + n_points: tuple[int, int] + Number of horizontal/azimuth and vertical/elevation scan lines. Defaults to (64, 128). + angular_resolution: tuple[float, float], optional + Horizontal and vertical angular resolution in degrees. Overrides n_points if provided. + angles: tuple[Sequence[float], Sequence[float]], optional + Array of horizontal/vertical angles. Overrides the other options if provided. + """ + + def __init__( + self, + fov: tuple[float | tuple[float, float], float | tuple[float, float]] = (30.0, 360.0), + n_points: tuple[int, int] = (64, 128), + angular_resolution: tuple[float | None, float | None] = (None, None), + angles: tuple[Sequence[float] | None, Sequence[float] | None] | None = (None, None), + ): + for fov_i in fov: + if (isinstance(fov_i, float) and (fov_i < 0 or fov_i > 360.0 + gs.EPS)) or ( + isinstance(fov_i, tuple) and (fov_i[1] - fov_i[0] > 360.0 + gs.EPS) + ): + gs.raise_exception(f"[{type(self).__name__}] FOV should be between 0 and 360. Got: {fov}.") + self.fov = fov + self.n_points = n_points + self.angular_resolution = angular_resolution + self.angles = angles + + super().__init__() + + def _get_return_shape(self) -> tuple[int, ...]: + width_and_height = [] + for n_points_i, fov_i, res_i, angles_i in zip( + self.n_points, + self.fov, + self.angular_resolution if self.angular_resolution is not None else (None, None), + self.angles if self.angles is not None else (None, None), + ): + if angles_i is not None: + n_rays = len(angles_i) + elif res_i is not None: + # Calculate number of rays from angular resolution + if isinstance(fov_i, tuple): + f_min, f_max = fov_i + fov_size = f_max - f_min + else: + fov_size = fov_i + n_rays = math.ceil(fov_size / res_i) + 1 + else: + # Use n_points directly + n_rays = n_points_i + + width_and_height.append(n_rays) + + return tuple(width_and_height) + + def compute_ray_dirs(self): + angles = [] + for n_points_i, fov_i, res_i, angles_i in zip( + self.n_points, + self.fov, + self.angular_resolution if self.angular_resolution is not None else (None, None), + self.angles if self.angles is not None else (None, None), + ): + angles.append( + _generate_uniform_angles( + n=n_points_i, + fov=fov_i, + res=res_i, + angles=angles_i, + ) + ) + + h_grid, v_grid = torch.meshgrid(*angles, indexing="ij") + self._ray_dirs[:] = spherical_to_cartesian(h_grid, v_grid) + + +# ============================== Camera Patterns ============================== + + def _compute_focal_lengths( width: int, height: int, fov_horizontal: float | None, fov_vertical: float | None ) -> tuple[float, float]: @@ -74,143 +256,6 @@ def _compute_focal_lengths( return fx, fy -# ============================== Generic Patterns ============================== - - -@dataclass -class GridPattern(RaycastPattern): - """ - Configuration for grid-based ray casting. - - Defines a 2D grid of rays in the sensor coordinate system. - - Parameters - ---------- - resolution : float - Grid spacing in meters. - size : tuple[float, float] - Grid dimensions (length, width) in meters. - direction : tuple[float, float, float] - Ray direction vector. - """ - - resolution: float = 0.1 - size: tuple[float, float] = (2.0, 2.0) - direction: tuple[float, float, float] = (0.0, 0.0, -1.0) - - def __post_init__(self): - if self.resolution < 1e-3: - gs.raise_exception(f"Resolution should be at least 1e-3 (1mm). Received: '{self.resolution}'.") - - def get_return_shape(self) -> tuple[int, ...]: - num_x = math.ceil(self.size[0] / self.resolution) + 1 - num_y = math.ceil(self.size[1] / self.resolution) + 1 - return (num_x, num_y) - - -@register_pattern(GridPattern, "grid") -class GridPatternGenerator(RaycastPatternGenerator): - """Generator for 2D grid ray patterns.""" - - def __init__(self, options: GridPattern): - super().__init__(options) - self.coords = [ - torch.arange(-size / 2, size / 2 + gs.EPS, options.resolution, dtype=gs.tc_float, device=gs.device) - for size in options.size - ] - self.direction = torch.tensor(options.direction, dtype=gs.tc_float, device=gs.device) - - def get_ray_directions(self) -> torch.Tensor: - return self.direction.expand((*self._return_shape, 3)) - - def get_ray_starts(self) -> torch.Tensor: - grid_x, grid_y = torch.meshgrid(*self.coords, indexing="xy") - starts = torch.zeros((*self._return_shape, 3), dtype=gs.tc_float, device=gs.device) - starts[..., 0] = grid_x - starts[..., 1] = grid_y - return starts - - -@dataclass -class SphericalPattern(RaycastPattern): - """ - Configuration for spherical ray pattern. - - Either specify: - - (n_vertical, n_horizontal, fov_vertical, fov_horizontal) for uniform spacing by count. - - (vertical_res, horizontal_res, fov_vertical, fov_horizontal) for uniform spacing by resolution. - - (vertical_angles, horizontal_angles) for custom angles. - - - Parameters - ---------- - fov_vertical: float | tuple[float, float] - Vertical field of view. - fov_horizontal: float | tuple[float, float] - Horizontal field of view. - n_vertical : int - Number of vertical/elevation scan lines. - n_horizontal : int - Number of horizontal/azimuth points per scan line. - res_vertical : float, optional - Vertical angular resolution in degrees. Overrides n_vertical if provided. - res_horizontal : float, optional - Horizontal angular resolution in degrees. Overrides n_horizontal if provided. - angles_vertical : Sequence[float], optional - Array of elevation angles. Overrides n_vertical/res_vertical/fov_vertical if provided. - angles_horizontal: Sequence[float], optional - Array of azimuth angles. Overrides n_horizontal/res_horizontal/fov_horizontal if provided. - """ - - fov_vertical: float | tuple[float, float] = 30.0 - fov_horizontal: float | tuple[float, float] = 360.0 - n_vertical: int = 64 - n_horizontal: int = 128 - res_vertical: float | None = None - res_horizontal: float | None = None - vertical_angles: Sequence[float] | None = None - horizontal_angles: Sequence[float] | None = None - - def validate(self): - for fov in (self.fov_vertical, self.fov_horizontal): - if (isinstance(fov, float) and (fov < 0 or fov > 360.0 + gs.EPS)) or ( - isinstance(fov, tuple) and (fov[1] - fov[0] > 360.0 + gs.EPS) - ): - gs.raise_exception(f"[{type(self).__class__}] FOV should be between 0 and 360. Got: {fov}.") - - def get_return_shape(self) -> tuple[int, ...]: - if self.vertical_angles is not None and self.horizontal_angles is not None: - return (len(self.vertical_angles), len(self.horizontal_angles)) - else: - return (self.n_vertical, self.n_horizontal) - - -@register_pattern(SphericalPattern, "spherical") -class SphericalPatternGenerator(RaycastPatternGenerator): - """Generator for uniform or custom spherical ray patterns.""" - - def get_ray_directions(self) -> torch.Tensor: - v_angles = _generate_uniform_angles( - n=self._options.n_vertical, - fov=self._options.fov_vertical, - res=self._options.res_vertical, - angles=self._options.vertical_angles, - ) - h_angles = _generate_uniform_angles( - n=self._options.n_horizontal, - fov=self._options.fov_horizontal, - res=self._options.res_horizontal, - angles=self._options.horizontal_angles, - ) - - h_grid, v_grid = torch.meshgrid(h_angles, v_angles, indexing="ij") - return spherical_to_cartesian(h_grid, v_grid) - - -# ============================== Camera Patterns ============================== - - -@dataclass class DepthCameraPattern(RaycastPattern): """Configuration for pinhole depth camera ray casting. @@ -234,32 +279,41 @@ class DepthCameraPattern(RaycastPattern): Vertical field of view in degrees. Used to compute fy if fy is None. """ - width: int = 128 - height: int = 96 - fx: float | None = None - fy: float | None = None - cx: float | None = None - cy: float | None = None - fov_horizontal: float = 90.0 - fov_vertical: float | None = None - - def get_return_shape(self) -> tuple[int, ...]: + def __init__( + self, + width: int = 128, + height: int = 96, + fx: float | None = None, + fy: float | None = None, + cx: float | None = None, + cy: float | None = None, + fov_horizontal: float = 90.0, + fov_vertical: float | None = None, + ): + + if width <= 0 or height <= 0: + gs.raise_exception(f"[{type(self).__name__}] Image dimensions must be positive. Got: ({width}, {height})") + + self.width = width + self.height = height + self.fx = fx + self.fy = fy + self.cx = cx + self.cy = cy + self.fov_horizontal = fov_horizontal + self.fov_vertical = fov_vertical + + super().__init__() + + def _get_return_shape(self) -> tuple[int, ...]: return (self.height, self.width) + def compute_ray_dirs(self): + W, H = int(self.width), int(self.height) -@register_pattern(DepthCameraPattern, "depth_camera") -class DepthCameraPatternGenerator(RaycastPatternGenerator): - """Generator for pinhole depth camera ray patterns.""" - - def get_ray_directions(self) -> torch.Tensor: - W, H = int(self._options.width), int(self._options.height) - - if W <= 0 or H <= 0: - raise ValueError("Image dimensions must be positive") - - fx, fy, cx, cy = self._options.fx, self._options.fy, self._options.cx, self._options.cy + fx, fy, cx, cy = self.fx, self.fy, self.cx, self.cy if fx is None or fy is None: - fx, fy = _compute_focal_lengths(W, H, self._options.fov_horizontal, self._options.fov_vertical) + fx, fy = _compute_focal_lengths(W, H, self.fov_horizontal, self.fov_vertical) if cx is None: cx = W * 0.5 if cy is None: @@ -278,4 +332,4 @@ def get_ray_directions(self) -> torch.Tensor: dirs = torch.stack([z_c, -x_c, -y_c], dim=-1) dirs /= torch.linalg.norm(dirs, dim=-1, keepdim=True) - return dirs + self._ray_dirs[:] = dirs diff --git a/genesis/sensors/raycaster/raycaster.py b/genesis/sensors/raycaster/raycaster.py index 5b23124ed..8ca095fbb 100644 --- a/genesis/sensors/raycaster/raycaster.py +++ b/genesis/sensors/raycaster/raycaster.py @@ -1,7 +1,7 @@ import itertools import math from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, NamedTuple +from typing import TYPE_CHECKING, Any, NamedTuple, Type import gstaichi as ti import torch @@ -29,9 +29,10 @@ SensorOptions, SharedSensorMetadata, ) -from .base_pattern import RaycastPattern, RaycastPatternGenerator, create_pattern_generator +from .patterns import RaycastPattern if TYPE_CHECKING: + from genesis.ext.pyrender.mesh import Mesh from genesis.utils.ring_buffer import TensorRingBuffer @@ -307,9 +308,8 @@ class RaycasterOptions(RigidSensorOptionsMixin, SensorOptions): The mounting offset position of the sensor in the world frame. Defaults to (0.0, 0.0, 0.0). euler_offset : tuple[float, float, float], optional The mounting offset quaternion of the sensor in the world frame. Defaults to (0.0, 0.0, 0.0). - pattern: RaycastPatternOptions | str - The raycasting pattern configuration for the sensor. If a string is provided, a config file with the same name - is expected in the `configs` directory. + pattern: RaycastPatternOptions + The raycasting pattern for the sensor. min_range : float, optional The minimum sensing range in meters. Defaults to 0.0. max_range : float, optional @@ -331,7 +331,7 @@ class RaycasterOptions(RigidSensorOptionsMixin, SensorOptions): The radius of each debug hit point sphere drawn in the scene. Defaults to 0.02. """ - pattern: RaycastPattern | str + pattern: RaycastPattern min_range: float = 0.0 max_range: float = 20.0 no_hit_value: float | None = None @@ -357,7 +357,7 @@ class RaycasterSharedMetadata(RigidSensorMetadataMixin, SharedSensorMetadata): no_hit_values: torch.Tensor = make_tensor_field((0,)) return_world_frame: torch.Tensor = make_tensor_field((0,), dtype_factory=lambda: gs.tc_bool) - pattern_generators: list[RaycastPatternGenerator] = field(default_factory=list) + patterns: list[RaycastPattern] = field(default_factory=list) ray_dirs: torch.Tensor = make_tensor_field((0, 3)) ray_starts: torch.Tensor = make_tensor_field((0, 3)) ray_starts_world: torch.Tensor = make_tensor_field((0, 3)) @@ -375,6 +375,16 @@ class RaycasterData(NamedTuple): @ti.data_oriented class RaycasterSensor(RigidSensorMixin, Sensor): + def __init__( + self, + options: RaycasterOptions, + shared_metadata: RaycasterSharedMetadata, + data_cls: Type[RaycasterData], + manager: "gs.SensorManager", + ): + super().__init__(options, shared_metadata, data_cls, manager) + self.debug_objects: list["Mesh | None"] = [] + @classmethod def _build_bvh(cls, shared_metadata: RaycasterSharedMetadata): n_lidar_faces = shared_metadata.solver.faces_info.geom_idx.shape[0] @@ -425,22 +435,22 @@ def build(self): self._shared_metadata.only_cast_fixed = self._options.only_cast_fixed # set for first only self._build_bvh(self._shared_metadata) - self.pattern_generator = create_pattern_generator(self._options.pattern) - self._shared_metadata.pattern_generators.append(self.pattern_generator) + pattern = self._options.pattern + self._shared_metadata.patterns.append(pattern) pos_offset = self._shared_metadata.offsets_pos[-1, :] quat_offset = self._shared_metadata.offsets_quat[..., -1, :] if self._shared_metadata.solver.n_envs > 0: quat_offset = quat_offset[0] # all envs have same offset on build - ray_starts = self.pattern_generator.get_ray_starts().reshape(-1, 3) + ray_starts = pattern.ray_starts.reshape(-1, 3) ray_starts = transform_by_trans_quat(ray_starts, pos_offset, quat_offset) self._shared_metadata.ray_starts = torch.cat([self._shared_metadata.ray_starts, ray_starts]) - ray_dirs = self.pattern_generator.get_ray_directions().reshape(-1, 3) + ray_dirs = pattern.ray_dirs.reshape(-1, 3) ray_dirs = transform_by_quat(ray_dirs, quat_offset) self._shared_metadata.ray_dirs = torch.cat([self._shared_metadata.ray_dirs, ray_dirs]) - num_rays = math.prod(self.pattern_generator._return_shape) + num_rays = math.prod(pattern.return_shape) self._shared_metadata.sensors_ray_start_idx.append(self._shared_metadata.total_n_rays) self._shared_metadata.total_n_rays += num_rays @@ -466,7 +476,7 @@ def reset(cls, shared_metadata: RaycasterSharedMetadata, envs_idx): cls._build_bvh(shared_metadata) def _get_return_format(self) -> tuple[tuple[int, ...], ...]: - shape = self._options.pattern.get_return_shape() + shape = self._options.pattern.return_shape return (*shape, 3), shape @classmethod @@ -536,7 +546,7 @@ def _draw_debug(self, context: "RasterizerContext"): Spheres will be different colors per environment. """ - data = self.read()["hit_points"].reshape(self._manager._sim._B, -1, 3) + data = self.read().hit_points.reshape(self._manager._sim._B, -1, 3) for env_idx, color in zip(range(data.shape[0]), itertools.cycle(DEBUG_COLORS)): points = data[env_idx] diff --git a/genesis/sensors/sensor_manager.py b/genesis/sensors/sensor_manager.py index cfab7ffee..a7d20531b 100644 --- a/genesis/sensors/sensor_manager.py +++ b/genesis/sensors/sensor_manager.py @@ -120,7 +120,7 @@ def sensors(self): def register_sensor( - options_cls: Type["SensorOptions"], metadata_cls: Type["SharedSensorMetadata"], data_cls: Type[tuple] = tuple + options_cls: Type["SensorOptions"], metadata_cls: Type["SharedSensorMetadata"], data_cls: Type[tuple] ): def _impl(sensor_cls: Type["Sensor"]): SensorManager.SENSOR_TYPES_MAP[options_cls] = sensor_cls, metadata_cls, data_cls From e35df0618db9b170b2c65ae24df2e7720a716a92 Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Wed, 1 Oct 2025 10:14:53 -0400 Subject: [PATCH 08/28] restore is_free/is_fixed change --- genesis/engine/entities/rigid_entity/rigid_entity.py | 2 -- genesis/options/morphs.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/genesis/engine/entities/rigid_entity/rigid_entity.py b/genesis/engine/entities/rigid_entity/rigid_entity.py index 918114e49..cb59f22a4 100644 --- a/genesis/engine/entities/rigid_entity/rigid_entity.py +++ b/genesis/engine/entities/rigid_entity/rigid_entity.py @@ -556,8 +556,6 @@ def _load_scene(self, morph, surface): def _build(self): for link in self._links: link._build() - if not link.is_fixed: - self._is_free = True self._n_qs = self.n_qs self._n_dofs = self.n_dofs diff --git a/genesis/options/morphs.py b/genesis/options/morphs.py index 2a463602b..bf2a651e5 100644 --- a/genesis/options/morphs.py +++ b/genesis/options/morphs.py @@ -84,7 +84,7 @@ class Morph(Options): visualization: bool = True collision: bool = True requires_jac_and_IK: bool = False - is_free: bool | None = None + is_free: bool = True def __init__(self, **data): super().__init__(**data) From 4ce6b7054caeff696f37094280de5702622f661b Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Wed, 1 Oct 2025 10:56:38 -0400 Subject: [PATCH 09/28] cleanup pattern --- examples/sensors/lidar_teleop.py | 8 -- genesis/sensors/raycaster/patterns.py | 118 +++++++++----------------- 2 files changed, 39 insertions(+), 87 deletions(-) diff --git a/examples/sensors/lidar_teleop.py b/examples/sensors/lidar_teleop.py index bc62463d1..5656e588e 100644 --- a/examples/sensors/lidar_teleop.py +++ b/examples/sensors/lidar_teleop.py @@ -65,14 +65,6 @@ def main(): "--fixed", action="store_true", help="Load obstacles as fixed and cast only against fixed objects", - default=True, - ) - parser.add_argument( - "-nf", - "--no-fixed", - action="store_false", - dest="fixed", - help="Load obstacles as collidable, raycaster will update BVH every step", ) parser.add_argument( "--pattern", diff --git a/genesis/sensors/raycaster/patterns.py b/genesis/sensors/raycaster/patterns.py index 4f524468e..404397064 100644 --- a/genesis/sensors/raycaster/patterns.py +++ b/genesis/sensors/raycaster/patterns.py @@ -18,6 +18,8 @@ def __init__(self): self._return_shape: tuple[int, ...] = self._get_return_shape() self._ray_dirs: torch.Tensor = torch.empty((*self._return_shape, 3), dtype=gs.tc_float, device=gs.device) self._ray_starts: torch.Tensor = torch.empty((*self._return_shape, 3), dtype=gs.tc_float, device=gs.device) + self.compute_ray_dirs() + self.compute_ray_starts() def _get_return_shape(self) -> tuple[int, ...]: """Get the shape of the ray vectors, e.g. (n_scan_lines, n_points_per_line) or (n_rays,)""" @@ -97,48 +99,47 @@ def compute_ray_starts(self): self._ray_starts[..., 2] = 0.0 -def _generate_uniform_angles( - n: int | None = None, - fov: float | tuple[float, float] | None = None, - res: float | None = None, - angles: Sequence[float] | None = None, -) -> torch.Tensor: +def _generate_uniform_angles(n_points, fov, res, angles) -> tuple[torch.Tensor, torch.Tensor]: """ Helper function to generate uniform angles given various formats (n and fov, res and fov, or angles). """ + return_angles = [] - if angles is None: - assert fov is not None, "FOV should be provided if angles not given." + for n_points_i, fov_i, res_i, angles_i in zip(n_points, fov, res, angles): + if angles_i is None: + assert fov_i is not None, "FOV should be provided if angles not given." - if res is not None: - if isinstance(fov, tuple): - f_min, f_max = fov + if res_i is not None: + if isinstance(fov_i, tuple): + f_min, f_max = fov_i + else: + f_max = fov_i / 2.0 + f_min = -f_max + n_points_i = math.ceil((f_max - f_min) / res_i) + 1 + + assert n_points_i is not None + + if isinstance(fov_i, tuple): + f_min, f_max = fov_i + fov_size = f_max - f_min else: - f_max = fov / 2.0 + f_max = fov_i / 2.0 f_min = -f_max - n = math.ceil((f_max - f_min) / res) + 1 - - assert n is not None + fov_size = fov_i - if isinstance(fov, tuple): - f_min, f_max = fov - fov_size = f_max - f_min - else: - f_max = fov / 2.0 - f_min = -f_max - fov_size = fov + assert fov_size <= 360.0 + gs.EPS, "FOV should not be larger than a full rotation." - assert fov_size <= 360.0 + gs.EPS, "FOV should not be larger than a full rotation." + # Avoid duplicate angle at 0/360 degrees + if fov_size >= 360.0 - gs.EPS: + f_max -= fov_size / (n_points_i - 1) * 0.5 - # avoid duplicate angle at 0/360 degrees - if fov_size >= 360.0 - gs.EPS: - f_max -= fov_size / (n - 1) * 0.5 + angles_i = torch.linspace(f_min, f_max, n_points_i, dtype=gs.tc_float, device=gs.device) + else: + angles_i = torch.tensor(angles_i, dtype=gs.tc_float, device=gs.device) - angles = torch.linspace(f_min, f_max, n, dtype=gs.tc_float, device=gs.device) - else: - angles = torch.tensor(angles, dtype=gs.tc_float, device=gs.device) + return_angles.append(torch.deg2rad(angles_i)) - return angles + return tuple(return_angles) class SphericalPattern(RaycastPattern): @@ -154,7 +155,7 @@ class SphericalPattern(RaycastPattern): Parameters ---------- fov: tuple[float | tuple[float, float], float | tuple[float, float]] - Field of view in degrees for horizontal and vertical directions. Defaults to (30.0, 360.0). + Field of view in degrees for horizontal and vertical directions. Defaults to (360.0, 30.0). If a single float is provided, the FOV is centered around 0 degrees. If a tuple is provided, it specifies the (min, max) angles. n_points: tuple[int, int] @@ -167,68 +168,27 @@ class SphericalPattern(RaycastPattern): def __init__( self, - fov: tuple[float | tuple[float, float], float | tuple[float, float]] = (30.0, 360.0), + fov: tuple[float | tuple[float, float], float | tuple[float, float]] = (360.0, 30.0), n_points: tuple[int, int] = (64, 128), angular_resolution: tuple[float | None, float | None] = (None, None), - angles: tuple[Sequence[float] | None, Sequence[float] | None] | None = (None, None), + angles: tuple[Sequence[float] | None, Sequence[float] | None] = (None, None), ): for fov_i in fov: if (isinstance(fov_i, float) and (fov_i < 0 or fov_i > 360.0 + gs.EPS)) or ( isinstance(fov_i, tuple) and (fov_i[1] - fov_i[0] > 360.0 + gs.EPS) ): gs.raise_exception(f"[{type(self).__name__}] FOV should be between 0 and 360. Got: {fov}.") - self.fov = fov - self.n_points = n_points - self.angular_resolution = angular_resolution - self.angles = angles + + self.angles = _generate_uniform_angles(n_points, fov, angular_resolution, angles) super().__init__() def _get_return_shape(self) -> tuple[int, ...]: - width_and_height = [] - for n_points_i, fov_i, res_i, angles_i in zip( - self.n_points, - self.fov, - self.angular_resolution if self.angular_resolution is not None else (None, None), - self.angles if self.angles is not None else (None, None), - ): - if angles_i is not None: - n_rays = len(angles_i) - elif res_i is not None: - # Calculate number of rays from angular resolution - if isinstance(fov_i, tuple): - f_min, f_max = fov_i - fov_size = f_max - f_min - else: - fov_size = fov_i - n_rays = math.ceil(fov_size / res_i) + 1 - else: - # Use n_points directly - n_rays = n_points_i - - width_and_height.append(n_rays) - - return tuple(width_and_height) + return tuple(len(a) for a in self.angles) def compute_ray_dirs(self): - angles = [] - for n_points_i, fov_i, res_i, angles_i in zip( - self.n_points, - self.fov, - self.angular_resolution if self.angular_resolution is not None else (None, None), - self.angles if self.angles is not None else (None, None), - ): - angles.append( - _generate_uniform_angles( - n=n_points_i, - fov=fov_i, - res=res_i, - angles=angles_i, - ) - ) - - h_grid, v_grid = torch.meshgrid(*angles, indexing="ij") - self._ray_dirs[:] = spherical_to_cartesian(h_grid, v_grid) + meshgrid = torch.meshgrid(*self.angles, indexing="ij") + self._ray_dirs[:] = spherical_to_cartesian(*meshgrid) # ============================== Camera Patterns ============================== From 608e586601f3a72ba42a98f4c6b8aac62a1d4f78 Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Wed, 1 Oct 2025 12:20:51 -0400 Subject: [PATCH 10/28] update post init sensor option validation --- genesis/sensors/base_sensor.py | 10 ++++++---- genesis/sensors/contact_force.py | 3 +-- genesis/sensors/imu.py | 3 +-- genesis/sensors/raycaster/raycaster.py | 11 ++++++++++- pyproject.toml | 2 +- 5 files changed, 19 insertions(+), 10 deletions(-) diff --git a/genesis/sensors/base_sensor.py b/genesis/sensors/base_sensor.py index 956c37337..031c19ccd 100644 --- a/genesis/sensors/base_sensor.py +++ b/genesis/sensors/base_sensor.py @@ -16,6 +16,7 @@ if TYPE_CHECKING: from genesis.engine.entities.rigid_entity.rigid_link import RigidLink + from genesis.engine.scene import Scene from genesis.recorders.base_recorder import Recorder, RecorderOptions from genesis.utils.ring_buffer import TensorRingBuffer from genesis.vis.rasterizer_context import RasterizerContext @@ -63,9 +64,11 @@ class SensorOptions(Options): update_ground_truth_only: bool = False draw_debug: bool = False - def validate(self, scene): + def validate(self, scene: "Scene"): """ Validate the sensor options values before the sensor is added to the scene. + + Use pydantic's model_post_init() for validation that does not require scene context. """ delay_hz = self.delay / scene._sim.dt if not np.isclose(delay_hz, round(delay_hz), atol=gs.EPS): @@ -352,7 +355,7 @@ class RigidSensorOptionsMixin: pos_offset: Tuple3FType = (0.0, 0.0, 0.0) euler_offset: Tuple3FType = (0.0, 0.0, 0.0) - def validate(self, scene): + def validate(self, scene: "Scene"): super().validate(scene) if self.entity_idx < 0 or self.entity_idx >= len(scene.entities): gs.raise_exception(f"Invalid RigidEntity index {self.entity_idx}.") @@ -444,8 +447,7 @@ class NoisySensorOptionsMixin: jitter: float = 0.0 interpolate: bool = False - def validate(self, scene): - super().validate(scene) + def model_post_init(self, _): if self.jitter > 0 and not self.interpolate: gs.raise_exception(f"{type(self).__name__}: `interpolate` should be True when `jitter` is greater than 0.") if self.jitter > self.delay: diff --git a/genesis/sensors/contact_force.py b/genesis/sensors/contact_force.py index 73495e338..c957f09aa 100644 --- a/genesis/sensors/contact_force.py +++ b/genesis/sensors/contact_force.py @@ -207,8 +207,7 @@ class ContactForceSensorOptions(RigidSensorOptionsMixin, NoisySensorOptionsMixin debug_color: tuple[float, float, float, float] = (1.0, 0.0, 1.0, 0.5) debug_scale: float = 0.01 - def validate(self, scene): - super().validate(scene) + def model_post_init(self, _): if not ( isinstance(self.min_force, float) or (isinstance(self.min_force, Sequence) and len(self.min_force) == 3) ): diff --git a/genesis/sensors/imu.py b/genesis/sensors/imu.py index ae8a6cb25..d1251fc5e 100644 --- a/genesis/sensors/imu.py +++ b/genesis/sensors/imu.py @@ -134,8 +134,7 @@ class IMUOptions(RigidSensorOptionsMixin, NoisySensorOptionsMixin, SensorOptions debug_gyro_color: tuple[float, float, float, float] = (1.0, 1.0, 0.0, 0.5) debug_gyro_scale: float = 0.01 - def validate(self, scene): - super().validate(scene) + def model_post_init(self, _): self._validate_axes_skew(self.acc_axes_skew) self._validate_axes_skew(self.gyro_axes_skew) diff --git a/genesis/sensors/raycaster/raycaster.py b/genesis/sensors/raycaster/raycaster.py index 8ca095fbb..7c106c7e1 100644 --- a/genesis/sensors/raycaster/raycaster.py +++ b/genesis/sensors/raycaster/raycaster.py @@ -5,6 +5,7 @@ import gstaichi as ti import torch +from pydantic import Field import genesis as gs import genesis.engine.solvers.rigid.rigid_solver_decomp as rigid_solver_decomp @@ -334,12 +335,20 @@ class RaycasterOptions(RigidSensorOptionsMixin, SensorOptions): pattern: RaycastPattern min_range: float = 0.0 max_range: float = 20.0 - no_hit_value: float | None = None + no_hit_value: float = Field(default_factory=lambda data: data["max_range"]) return_world_frame: bool = False only_cast_fixed: bool = False debug_sphere_radius: float = 0.02 + def model_post_init(self, _): + if self.min_range < 0.0: + gs.raise_exception(f"[{type(self).__name__}] min_range should be non-negative. Got: {self.min_range}.") + if self.max_range <= self.min_range: + gs.raise_exception( + f"[{type(self).__name__}] max_range {self.max_range} should be greater than min_range {self.min_range}." + ) + @dataclass class RaycasterSharedMetadata(RigidSensorMetadataMixin, SharedSensorMetadata): diff --git a/pyproject.toml b/pyproject.toml index 024e493b8..42fab79f7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,7 @@ requires-python = ">=3.10,<3.14" dependencies = [ "psutil", "gstaichi==2.5.0", - "pydantic>=2.7.1", + "pydantic>=2.11.0", "numpy>=1.26.4", "trimesh", "scipy>=1.14", From d308fd8f0ed0f1f270530baaf271dda7b2b34fc6 Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Thu, 2 Oct 2025 02:02:42 -0400 Subject: [PATCH 11/28] update raycaster test, rename raycaster data fields --- genesis/sensors/raycaster/depth_camera.py | 2 +- genesis/sensors/raycaster/patterns.py | 11 +- genesis/sensors/raycaster/raycaster.py | 118 ++++++++++++-------- test_raycaster.py | 126 ++++++++++++++++++++++ tests/test_sensors.py | 105 +++++++++++++----- 5 files changed, 284 insertions(+), 78 deletions(-) create mode 100644 test_raycaster.py diff --git a/genesis/sensors/raycaster/depth_camera.py b/genesis/sensors/raycaster/depth_camera.py index 65a292c19..7cedfdb54 100644 --- a/genesis/sensors/raycaster/depth_camera.py +++ b/genesis/sensors/raycaster/depth_camera.py @@ -32,4 +32,4 @@ def read_image(self) -> torch.Tensor: torch.Tensor The depth image with shape (height, width). """ - return self.read().hit_ranges.reshape(self._options.pattern.height, self._options.pattern.width) + return self.read().distances.reshape(self._options.pattern.height, self._options.pattern.width) diff --git a/genesis/sensors/raycaster/patterns.py b/genesis/sensors/raycaster/patterns.py index 404397064..9fe77ef1c 100644 --- a/genesis/sensors/raycaster/patterns.py +++ b/genesis/sensors/raycaster/patterns.py @@ -99,7 +99,12 @@ def compute_ray_starts(self): self._ray_starts[..., 2] = 0.0 -def _generate_uniform_angles(n_points, fov, res, angles) -> tuple[torch.Tensor, torch.Tensor]: +def _generate_uniform_angles( + n_points: tuple[int, int], + fov: tuple[float | tuple[float, float] | None, float | tuple[float, float] | None], + res: tuple[float | None, float | None], + angles: tuple[Sequence[float] | None, Sequence[float] | None], +) -> tuple[torch.Tensor, torch.Tensor]: """ Helper function to generate uniform angles given various formats (n and fov, res and fov, or angles). """ @@ -110,7 +115,7 @@ def _generate_uniform_angles(n_points, fov, res, angles) -> tuple[torch.Tensor, assert fov_i is not None, "FOV should be provided if angles not given." if res_i is not None: - if isinstance(fov_i, tuple): + if isinstance(fov_i, Sequence): f_min, f_max = fov_i else: f_max = fov_i / 2.0 @@ -119,7 +124,7 @@ def _generate_uniform_angles(n_points, fov, res, angles) -> tuple[torch.Tensor, assert n_points_i is not None - if isinstance(fov_i, tuple): + if isinstance(fov_i, Sequence): f_min, f_max = fov_i fov_size = f_max - f_min else: diff --git a/genesis/sensors/raycaster/raycaster.py b/genesis/sensors/raycaster/raycaster.py index 7c106c7e1..fe6368cae 100644 --- a/genesis/sensors/raycaster/raycaster.py +++ b/genesis/sensors/raycaster/raycaster.py @@ -136,15 +136,15 @@ def ray_aabb_intersection(ray_start, ray_dir, aabb_min, aabb_max): @ti.kernel def kernel_update_aabbs( - map_lidar_faces: ti.template(), + map_faces: ti.template(), free_verts_state: array_class.VertsState, fixed_verts_state: array_class.VertsState, verts_info: array_class.VertsInfo, faces_info: array_class.FacesInfo, aabb_state: array_class.AABBState, ): - for i_b, i_f_ in ti.ndrange(free_verts_state.pos.shape[1], map_lidar_faces.shape[0]): - i_f = map_lidar_faces[i_f_] + for i_b, i_f_ in ti.ndrange(free_verts_state.pos.shape[1], map_faces.shape[0]): + i_f = map_faces[i_f_] aabb_state.aabbs[i_b, i_f].min.fill(ti.math.inf) aabb_state.aabbs[i_b, i_f].max.fill(-ti.math.inf) @@ -161,8 +161,8 @@ def kernel_update_aabbs( @ti.kernel -def kernel_cast_lidar_rays( - map_lidar_faces: ti.template(), +def kernel_cast_rays( + map_faces: ti.template(), fixed_verts_state: array_class.VertsState, free_verts_state: array_class.VertsState, verts_info: array_class.VertsInfo, @@ -177,16 +177,19 @@ def kernel_cast_lidar_rays( no_hit_values: ti.types.ndarray(ndim=1), # [n_sensors] points_to_sensor_idx: ti.types.ndarray(ndim=1), # [n_points] is_world_frame: ti.types.ndarray(ndim=1), # [n_sensors] - output_hits: ti.types.ndarray(ndim=2), # [n_env, n_points_per_sensor * 4 * n_sensors] + sensor_cache_offsets: ti.types.ndarray(ndim=1), # [n_sensors] - cache start index for each sensor + sensor_point_offsets: ti.types.ndarray(ndim=1), # [n_sensors] - point start index for each sensor + sensor_point_counts: ti.types.ndarray(ndim=1), # [n_sensors] - number of points for each sensor + output_hits: ti.types.ndarray(ndim=2), # [n_env, total_cache_size] ): """ Taichi kernel for ray casting, accelerated by a Bounding Volume Hierarchy (BVH). - The result `output_hits` will be a 2D array of shape (n_env, n_points * 4) where in the second dimension, - the first n_points * 3 are hit points and the last n_points is hit ranges. + The result `output_hits` will be a 2D array of shape (n_env, total_cache_size) where in the second dimension, + each sensor's data is stored as [sensor_points (n_points * 3), sensor_ranges (n_points)]. """ - n_triangles = map_lidar_faces.shape[0] + n_triangles = map_faces.shape[0] n_points = ray_starts.shape[0] # batch, point for i_b, i_p in ti.ndrange(output_hits.shape[0], n_points): @@ -236,7 +239,7 @@ def kernel_cast_lidar_rays( assert original_tri_idx >= 0 assert original_tri_idx < n_triangles - i_f = map_lidar_faces[original_tri_idx] + i_f = map_faces[original_tri_idx] is_free = verts_info.is_free[faces_info.verts_idx[i_f][0]] v0 = ti.Vector.zero(gs.ti_float, 3) @@ -270,29 +273,37 @@ def kernel_cast_lidar_rays( stack_idx += 2 # --- 3. Process Hit Result --- + # The format of output_hits is: [sensor1 points][sensor1 ranges][sensor2 points][sensor2 ranges]... + i_p_sensor = i_p - sensor_point_offsets[i_s] + i_p_offset = sensor_cache_offsets[i_s] # cumulative cache offset for this sensor + n_points_in_sensor = sensor_point_counts[i_s] # number of points in this sensor + if hit_face >= 0: dist = max_range - output_hits[i_b, n_points * 3 + i_p] = dist + # Store distance at: cache_offset + (num_points_in_sensor * 3) + point_idx_in_sensor + output_hits[i_b, i_p_offset + n_points_in_sensor * 3 + i_p_sensor] = dist if is_world_frame[i_s]: hit_point = ray_start_world + dist * ray_direction_world - output_hits[i_b, i_p * 3 + 0] = hit_point.x - output_hits[i_b, i_p * 3 + 1] = hit_point.y - output_hits[i_b, i_p * 3 + 2] = hit_point.z + # Store points at: cache_offset + point_idx_in_sensor * 3 + output_hits[i_b, i_p_offset + i_p_sensor * 3 + 0] = hit_point.x + output_hits[i_b, i_p_offset + i_p_sensor * 3 + 1] = hit_point.y + output_hits[i_b, i_p_offset + i_p_sensor * 3 + 2] = hit_point.z else: # Local frame output along provided local ray direction hit_point = dist * ti_normalize( ti.math.vec3(ray_directions[i_p, 0], ray_directions[i_p, 1], ray_directions[i_p, 2]) ) - output_hits[i_b, i_p * 3 + 0] = hit_point.x - output_hits[i_b, i_p * 3 + 1] = hit_point.y - output_hits[i_b, i_p * 3 + 2] = hit_point.z + output_hits[i_b, i_p_offset + i_p_sensor * 3 + 0] = hit_point.x + output_hits[i_b, i_p_offset + i_p_sensor * 3 + 1] = hit_point.y + output_hits[i_b, i_p_offset + i_p_sensor * 3 + 2] = hit_point.z else: - output_hits[i_b, i_p * 3 + 0] = 0.0 - output_hits[i_b, i_p * 3 + 1] = 0.0 - output_hits[i_b, i_p * 3 + 2] = 0.0 - output_hits[i_b, n_points * 3 + i_p] = no_hit_values[i_s] + # No hit + output_hits[i_b, i_p_offset + i_p_sensor * 3 + 0] = 0.0 + output_hits[i_b, i_p_offset + i_p_sensor * 3 + 1] = 0.0 + output_hits[i_b, i_p_offset + i_p_sensor * 3 + 2] = 0.0 + output_hits[i_b, i_p_offset + n_points_in_sensor * 3 + i_p_sensor] = no_hit_values[i_s] class RaycasterOptions(RigidSensorOptionsMixin, SensorOptions): @@ -321,7 +332,7 @@ class RaycasterOptions(RigidSensorOptionsMixin, SensorOptions): Whether to return points in the world frame. Defaults to False (local frame). only_cast_fixed : bool, optional Whether to only cast rays on fixed geoms. Defaults to False. This is a shared option, so the value of this - option for the **first** lidar sensor will be the behavior for **all** Lidar sensors. + option for the **first** Raycaster sensor will be the behavior for **all** Raycaster sensors. delay : float, optional The delay in seconds before the sensor data is read. update_ground_truth_only : bool, optional @@ -355,8 +366,8 @@ class RaycasterSharedMetadata(RigidSensorMetadataMixin, SharedSensorMetadata): bvh: LBVH | None = None aabb: AABB | None = None only_cast_fixed: bool = False - map_lidar_faces: Any | None = None - n_lidar_faces: int = 0 + map_faces: Any | None = None + n_faces: int = 0 sensors_ray_start_idx: list[int] = field(default_factory=list) total_n_rays: int = 0 @@ -373,11 +384,14 @@ class RaycasterSharedMetadata(RigidSensorMetadataMixin, SharedSensorMetadata): ray_dirs_world: torch.Tensor = make_tensor_field((0, 3)) points_to_sensor_idx: torch.Tensor = make_tensor_field((0,), dtype_factory=lambda: gs.tc_int) + sensor_cache_offsets: torch.Tensor = make_tensor_field((0,), dtype_factory=lambda: gs.tc_int) + sensor_point_offsets: torch.Tensor = make_tensor_field((0,), dtype_factory=lambda: gs.tc_int) + sensor_point_counts: torch.Tensor = make_tensor_field((0,), dtype_factory=lambda: gs.tc_int) class RaycasterData(NamedTuple): - hit_points: torch.Tensor - hit_ranges: torch.Tensor + points: torch.Tensor + distances: torch.Tensor @register_sensor(RaycasterOptions, RaycasterSharedMetadata, RaycasterData) @@ -396,27 +410,26 @@ def __init__( @classmethod def _build_bvh(cls, shared_metadata: RaycasterSharedMetadata): - n_lidar_faces = shared_metadata.solver.faces_info.geom_idx.shape[0] - torch_map_lidar_faces = torch.arange(n_lidar_faces, dtype=torch.int32, device=gs.device) + n_faces = shared_metadata.solver.faces_info.geom_idx.shape[0] + torch_map_faces = torch.arange(n_faces, dtype=torch.int32, device=gs.device) if shared_metadata.only_cast_fixed: # count the number of faces in a fixed geoms geom_is_fixed = torch.logical_not(ti_to_torch(shared_metadata.solver.geoms_info.is_free)) faces_geom = ti_to_torch(shared_metadata.solver.faces_info.geom_idx) - n_lidar_faces = torch.sum(geom_is_fixed[faces_geom]).item() - if n_lidar_faces == 0: + n_faces = torch.sum(geom_is_fixed[faces_geom]).item() + if n_faces == 0: gs.raise_exception( "No fixed geoms found in the scene. To use only_cast_fixed, " "at least some entities should have is_free=False." + # TODO: update message after PR #1795 is merged ) - torch_map_lidar_faces = torch.where(geom_is_fixed[faces_geom])[0] + torch_map_faces = torch.where(geom_is_fixed[faces_geom])[0] - shared_metadata.map_lidar_faces = ti.field(ti.i32, (n_lidar_faces)) - shared_metadata.map_lidar_faces.from_torch(torch_map_lidar_faces) - shared_metadata.n_lidar_faces = n_lidar_faces + shared_metadata.map_faces = ti.field(ti.i32, (n_faces)) + shared_metadata.map_faces.from_torch(torch_map_faces) + shared_metadata.n_faces = n_faces - shared_metadata.aabb = AABB( - n_batches=shared_metadata.solver.free_verts_state.pos.shape[1], n_aabbs=n_lidar_faces - ) + shared_metadata.aabb = AABB(n_batches=shared_metadata.solver.free_verts_state.pos.shape[1], n_aabbs=n_faces) rigid_solver_decomp.kernel_update_all_verts( geoms_state=shared_metadata.solver.geoms_state, @@ -426,7 +439,7 @@ def _build_bvh(cls, shared_metadata: RaycasterSharedMetadata): ) kernel_update_aabbs( - map_lidar_faces=shared_metadata.map_lidar_faces, + map_faces=shared_metadata.map_faces, free_verts_state=shared_metadata.solver.free_verts_state, fixed_verts_state=shared_metadata.solver.fixed_verts_state, verts_info=shared_metadata.solver.verts_info, @@ -446,10 +459,8 @@ def build(self): pattern = self._options.pattern self._shared_metadata.patterns.append(pattern) - pos_offset = self._shared_metadata.offsets_pos[-1, :] - quat_offset = self._shared_metadata.offsets_quat[..., -1, :] - if self._shared_metadata.solver.n_envs > 0: - quat_offset = quat_offset[0] # all envs have same offset on build + pos_offset = self._shared_metadata.offsets_pos[0, -1, :] # all envs have same offset on build + quat_offset = self._shared_metadata.offsets_quat[0, -1, :] ray_starts = pattern.ray_starts.reshape(-1, 3) ray_starts = transform_by_trans_quat(ray_starts, pos_offset, quat_offset) @@ -461,6 +472,18 @@ def build(self): num_rays = math.prod(pattern.return_shape) self._shared_metadata.sensors_ray_start_idx.append(self._shared_metadata.total_n_rays) + + # These fields are used to properly index into the big cache tensor in kernel_cast_rays + self._shared_metadata.sensor_cache_offsets = concat_with_tensor( + self._shared_metadata.sensor_cache_offsets, self._cache_idx + ) + self._shared_metadata.sensor_point_offsets = concat_with_tensor( + self._shared_metadata.sensor_point_offsets, self._shared_metadata.total_n_rays + ) + self._shared_metadata.sensor_point_counts = concat_with_tensor( + self._shared_metadata.sensor_point_counts, num_rays + ) + self._shared_metadata.total_n_rays += num_rays self._shared_metadata.points_to_sensor_idx = concat_with_tensor( @@ -505,7 +528,7 @@ def _update_shared_ground_truth_cache( ) kernel_update_aabbs( - map_lidar_faces=shared_metadata.map_lidar_faces, + map_faces=shared_metadata.map_faces, free_verts_state=shared_metadata.solver.free_verts_state, fixed_verts_state=shared_metadata.solver.fixed_verts_state, verts_info=shared_metadata.solver.verts_info, @@ -519,8 +542,8 @@ def _update_shared_ground_truth_cache( links_pos = links_pos.unsqueeze(0) links_quat = links_quat.unsqueeze(0) - kernel_cast_lidar_rays( - map_lidar_faces=shared_metadata.map_lidar_faces, + kernel_cast_rays( + map_faces=shared_metadata.map_faces, fixed_verts_state=shared_metadata.solver.fixed_verts_state, free_verts_state=shared_metadata.solver.free_verts_state, verts_info=shared_metadata.solver.verts_info, @@ -535,6 +558,9 @@ def _update_shared_ground_truth_cache( no_hit_values=shared_metadata.no_hit_values, points_to_sensor_idx=shared_metadata.points_to_sensor_idx, is_world_frame=shared_metadata.return_world_frame, + sensor_cache_offsets=shared_metadata.sensor_cache_offsets, + sensor_point_offsets=shared_metadata.sensor_point_offsets, + sensor_point_counts=shared_metadata.sensor_point_counts, output_hits=shared_ground_truth_cache, ) @@ -555,7 +581,7 @@ def _draw_debug(self, context: "RasterizerContext"): Spheres will be different colors per environment. """ - data = self.read().hit_points.reshape(self._manager._sim._B, -1, 3) + data = self.read().points.reshape(self._manager._sim._B, -1, 3) for env_idx, color in zip(range(data.shape[0]), itertools.cycle(DEBUG_COLORS)): points = data[env_idx] diff --git a/test_raycaster.py b/test_raycaster.py new file mode 100644 index 000000000..921cee942 --- /dev/null +++ b/test_raycaster.py @@ -0,0 +1,126 @@ +import numpy as np + +import genesis as gs +from tests.utils import assert_allclose + + +def expand_batch_dim(values: tuple[float, ...], n_envs: int) -> tuple[float, ...] | np.ndarray: + """Helper function to expand expected values for n_envs dimension.""" + if n_envs == 0: + return values + return np.tile(np.array(values), (n_envs,) + (1,) * len(values)) + + +def test_raycaster_hits(show_viewer, tol, n_envs, only_cast_fixed): + """Test if the Raycaster sensor with GridPattern rays pointing to ground returns the correct distance.""" + EXPECTED_DISTANCE = 0.7 + NUM_RAYS_XY = 3 + BOX_HEIGHT = 0.2 + SPHERE_POS = (4.0, 0.0, 1.0) + RAYCAST_GRID_SIZE = 0.5 + + scene = gs.Scene( + profiling_options=gs.options.ProfilingOptions(show_FPS=False), + show_viewer=show_viewer, + ) + + scene.add_entity( + gs.morphs.Plane( + is_free=False, # TODO: remove after PR #1795 is merged + ) + ) + + box_obstacle = scene.add_entity( + gs.morphs.Box( + size=(RAYCAST_GRID_SIZE / 2.0, RAYCAST_GRID_SIZE / 2.0, BOX_HEIGHT), + # pos=(0.0, 0.0, -BOX_HEIGHT), # init below ground to not interfere with first raycast + pos=(RAYCAST_GRID_SIZE, RAYCAST_GRID_SIZE, EXPECTED_DISTANCE / 2.0 + BOX_HEIGHT / 2.0), + ), + ) + grid_sensor_box = scene.add_entity( + gs.morphs.Box( + size=(0.1, 0.1, 0.1), + pos=(0.0, 0.0, EXPECTED_DISTANCE + BOX_HEIGHT), + fixed=True, + ), + ) + grid_raycaster = scene.add_sensor( + gs.sensors.Raycaster( + pattern=gs.sensors.raycaster.GridPattern( + resolution=1.0 / (NUM_RAYS_XY - 1.0), + size=(1.0, 1.0), + direction=(0.0, 0.0, -1.0), # pointing downwards to ground + ), + entity_idx=grid_sensor_box.idx, + pos_offset=(0.0, 0.0, -BOX_HEIGHT), + return_world_frame=True, + only_cast_fixed=only_cast_fixed, + draw_debug=True, + ) + ) + + spherical_sensor = scene.add_entity( + gs.morphs.Sphere( + radius=EXPECTED_DISTANCE, + pos=SPHERE_POS, + fixed=True, + ), + ) + spherical_raycaster = scene.add_sensor( + gs.sensors.Raycaster( + pattern=gs.sensors.raycaster.SphericalPattern( + n_points=(NUM_RAYS_XY, NUM_RAYS_XY), + ), + entity_idx=spherical_sensor.idx, + return_world_frame=False, + only_cast_fixed=only_cast_fixed, + ) + ) + + scene.build(n_envs=n_envs) + + scene.step() + + grid_hits = grid_raycaster.read().points + grid_distances = grid_raycaster.read().distances + spherical_distances = spherical_raycaster.read().distances + + expected_shape = (NUM_RAYS_XY, NUM_RAYS_XY) if n_envs == 0 else (n_envs, NUM_RAYS_XY, NUM_RAYS_XY) + assert grid_distances.shape == spherical_distances.shape == expected_shape + + grid_distance_min = grid_distances.min() + assert grid_distances.min() < EXPECTED_DISTANCE - tol, "Raycaster grid pattern should have hit obstacle" + + ground_hit_mask = grid_distances > grid_distance_min + tol + assert_allclose( + grid_hits[ground_hit_mask][..., 2], + 0.0, + tol=tol, + err_msg="Raycaster grid pattern should hit ground (z≈0)", + ) + assert_allclose( + grid_distances[ground_hit_mask], + EXPECTED_DISTANCE, + tol=tol, + err_msg=f"Raycaster grid pattern should measure {EXPECTED_DISTANCE}m to ground plane", + ) + assert_allclose( + spherical_distances, + EXPECTED_DISTANCE, + tol=1e-2, # since sphere mesh is discretized, we need a larger tolerance here + err_msg=f"Raycaster spherical pattern should measure {EXPECTED_DISTANCE}m to the sphere around it", + ) + + for _ in range(5): + scene.step() + + assert grid_raycaster.read().distances.min() > grid_distance_min, "Raycaster should hit falling obstacle" + + +if __name__ == "__main__": + gs.init() + + test_raycaster_hits(show_viewer=False, tol=1e-3, n_envs=0, only_cast_fixed=False) + test_raycaster_hits(show_viewer=False, tol=1e-3, n_envs=2, only_cast_fixed=False) + # test_raycaster_hits(show_viewer=False, tol=1e-3, n_envs=10, only_cast_fixed=False) + # test_raycaster_hits(show_viewer=False, tol=1e-3, n_envs=10, only_cast_fixed=True) diff --git a/tests/test_sensors.py b/tests/test_sensors.py index d0089722d..960407763 100644 --- a/tests/test_sensors.py +++ b/tests/test_sensors.py @@ -252,41 +252,70 @@ def test_rigid_tactile_sensors_gravity_force(show_viewer, tol, n_envs): @pytest.mark.required @pytest.mark.parametrize("n_envs", [0, 2]) -@pytest.mark.parametrize("is_fixed", [False, True]) -def test_lidar_grid_pattern_ground_distance(show_viewer, tol, n_envs, is_fixed): +@pytest.mark.parametrize("only_cast_fixed", [False, True]) +def test_raycaster_hits(show_viewer, tol, n_envs, only_cast_fixed): """Test if the Raycaster sensor with GridPattern rays pointing to ground returns the correct distance.""" - EXPECTED_DISTANCE = 1.5 # Distance from sensor to floor - NUM_RAYS_XY = 4 # Total number of rays is NUM_RAYS_XY ** 2 - BOX_SIZE = 0.1 # Size of the box to which the LiDAR is attached + EXPECTED_DISTANCE = 0.7 + NUM_RAYS_XY = 3 + BOX_HEIGHT = 0.2 + SPHERE_POS = (4.0, 0.0, 1.0) + RAYCAST_GRID_SIZE = 0.5 scene = gs.Scene( profiling_options=gs.options.ProfilingOptions(show_FPS=False), show_viewer=show_viewer, ) - plane = scene.add_entity(gs.morphs.Plane(is_free=not is_fixed)) + scene.add_entity( + gs.morphs.Plane( + is_free=False, # TODO: remove after PR #1795 is merged + ) + ) - block = scene.add_entity( + box_obstacle = scene.add_entity( + gs.morphs.Box( + size=(RAYCAST_GRID_SIZE / 2.0, RAYCAST_GRID_SIZE / 2.0, BOX_HEIGHT), + # pos=(0.0, 0.0, -BOX_HEIGHT), # init below ground to not interfere with first raycast + pos=(RAYCAST_GRID_SIZE, RAYCAST_GRID_SIZE, EXPECTED_DISTANCE / 2.0 + BOX_HEIGHT / 2.0), + ), + ) + grid_sensor_box = scene.add_entity( gs.morphs.Box( - size=(BOX_SIZE, BOX_SIZE, BOX_SIZE), - pos=(0.0, 0.0, EXPECTED_DISTANCE + BOX_SIZE / 2.0), + size=(0.1, 0.1, 0.1), + pos=(0.0, 0.0, EXPECTED_DISTANCE + BOX_HEIGHT), fixed=True, ), ) - - grid_pattern = gs.sensors.raycaster.GridPattern( - resolution=1.0 / (NUM_RAYS_XY - 1.0), - size=(1.0, 1.0), - direction=(0.0, 0.0, -1.0), # pointing downwards to ground + grid_raycaster = scene.add_sensor( + gs.sensors.Raycaster( + pattern=gs.sensors.raycaster.GridPattern( + resolution=1.0 / (NUM_RAYS_XY - 1.0), + size=(1.0, 1.0), + direction=(0.0, 0.0, -1.0), # pointing downwards to ground + ), + entity_idx=grid_sensor_box.idx, + pos_offset=(0.0, 0.0, -BOX_HEIGHT), + return_world_frame=True, + only_cast_fixed=only_cast_fixed, + draw_debug=True, + ) ) - lidar = scene.add_sensor( - gs.sensors.Lidar( - pattern=grid_pattern, - entity_idx=block.idx, - pos_offset=(0.0, 0.0, -BOX_SIZE / 2.0), - return_world_frame=True, - only_cast_fixed=is_fixed, + spherical_sensor = scene.add_entity( + gs.morphs.Sphere( + radius=EXPECTED_DISTANCE, + pos=SPHERE_POS, + fixed=True, + ), + ) + spherical_raycaster = scene.add_sensor( + gs.sensors.Raycaster( + pattern=gs.sensors.raycaster.SphericalPattern( + n_points=(NUM_RAYS_XY, NUM_RAYS_XY), + ), + entity_idx=spherical_sensor.idx, + return_world_frame=False, + only_cast_fixed=only_cast_fixed, ) ) @@ -294,17 +323,37 @@ def test_lidar_grid_pattern_ground_distance(show_viewer, tol, n_envs, is_fixed): scene.step() - sensor_data = lidar.read() - hit_points = sensor_data.hit_points - distances = sensor_data.hit_ranges + grid_hits = grid_raycaster.read().points + grid_distances = grid_raycaster.read().distances + spherical_distances = spherical_raycaster.read().distances expected_shape = (NUM_RAYS_XY, NUM_RAYS_XY) if n_envs == 0 else (n_envs, NUM_RAYS_XY, NUM_RAYS_XY) - assert distances.shape == expected_shape + assert grid_distances.shape == spherical_distances.shape == expected_shape + + grid_distance_min = grid_distances.min() + assert grid_distances.min() < EXPECTED_DISTANCE - tol, "Raycaster grid pattern should have hit obstacle" - assert_allclose(hit_points[..., 2], 0.0, tol=tol, err_msg="LiDAR hit point should be at ground level (z≈0)") + ground_hit_mask = grid_distances > grid_distance_min + tol + assert_allclose( + grid_hits[ground_hit_mask][..., 2], + 0.0, + tol=tol, + err_msg="Raycaster grid pattern should hit ground (z≈0)", + ) assert_allclose( - distances, + grid_distances[ground_hit_mask], EXPECTED_DISTANCE, tol=tol, - err_msg=f"LiDAR should measure distance {EXPECTED_DISTANCE}m to ground plane", + err_msg=f"Raycaster grid pattern should measure {EXPECTED_DISTANCE}m to ground plane", ) + assert_allclose( + spherical_distances, + EXPECTED_DISTANCE, + tol=1e-2, # since sphere mesh is discretized, we need a larger tolerance here + err_msg=f"Raycaster spherical pattern should measure {EXPECTED_DISTANCE}m to the sphere around it", + ) + + for _ in range(5): + scene.step() + + assert grid_raycaster.read().distances.min() > grid_distance_min, "Raycaster should hit falling obstacle" From 9983f4354f80d3a16d3cbcb02e81f93e541c691a Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Thu, 2 Oct 2025 02:36:54 -0400 Subject: [PATCH 12/28] fix unintended stuffs --- examples/keyboard_teleop.py | 27 +++++- genesis/sensors/raycaster/raycaster.py | 26 +---- test_raycaster.py | 126 ------------------------- tests/test_sensors.py | 19 ++-- 4 files changed, 39 insertions(+), 159 deletions(-) delete mode 100644 test_raycaster.py diff --git a/examples/keyboard_teleop.py b/examples/keyboard_teleop.py index c951c7cff..9b80c5d74 100644 --- a/examples/keyboard_teleop.py +++ b/examples/keyboard_teleop.py @@ -17,12 +17,35 @@ import random import threading +import genesis as gs import numpy as np -from examples.util.keyboard import KeyboardDevice from pynput import keyboard from scipy.spatial.transform import Rotation as R -import genesis as gs + +class KeyboardDevice: + def __init__(self): + self.pressed_keys = set() + self.lock = threading.Lock() + self.listener = keyboard.Listener(on_press=self.on_press, on_release=self.on_release) + + def start(self): + self.listener.start() + + def stop(self): + self.listener.stop() + self.listener.join() + + def on_press(self, key: keyboard.Key): + with self.lock: + self.pressed_keys.add(key) + + def on_release(self, key: keyboard.Key): + with self.lock: + self.pressed_keys.discard(key) + + def get_cmd(self): + return self.pressed_keys class KeyboardDevice: diff --git a/genesis/sensors/raycaster/raycaster.py b/genesis/sensors/raycaster/raycaster.py index fe6368cae..90f97b93c 100644 --- a/genesis/sensors/raycaster/raycaster.py +++ b/genesis/sensors/raycaster/raycaster.py @@ -175,8 +175,8 @@ def kernel_cast_rays( ray_directions: ti.types.ndarray(ndim=2), # [n_points, 3] max_ranges: ti.types.ndarray(ndim=1), # [n_sensors] no_hit_values: ti.types.ndarray(ndim=1), # [n_sensors] - points_to_sensor_idx: ti.types.ndarray(ndim=1), # [n_points] is_world_frame: ti.types.ndarray(ndim=1), # [n_sensors] + points_to_sensor_idx: ti.types.ndarray(ndim=1), # [n_points] sensor_cache_offsets: ti.types.ndarray(ndim=1), # [n_sensors] - cache start index for each sensor sensor_point_offsets: ti.types.ndarray(ndim=1), # [n_sensors] - point start index for each sensor sensor_point_counts: ti.types.ndarray(ndim=1), # [n_sensors] - number of points for each sensor @@ -229,16 +229,8 @@ def kernel_cast_rays( if node.left == -1: # is leaf node # A leaf node corresponds to one of the sorted triangles. Find the original triangle index. sorted_leaf_idx = node_idx - (n_triangles - 1) - - assert n_triangles > 0 - assert sorted_leaf_idx >= 0 - assert sorted_leaf_idx < n_triangles - original_tri_idx = bvh_morton_codes[0, sorted_leaf_idx][1] - assert original_tri_idx >= 0 - assert original_tri_idx < n_triangles - i_f = map_faces[original_tri_idx] is_free = verts_info.is_free[faces_info.verts_idx[i_f][0]] @@ -312,14 +304,6 @@ class RaycasterOptions(RigidSensorOptionsMixin, SensorOptions): Parameters ---------- - entity_idx : int - The global entity index of the RigidEntity to which this sensor is attached. - link_idx_local : int, optional - The local index of the RigidLink of the RigidEntity to which this sensor is attached. - pos_offset : tuple[float, float, float], optional - The mounting offset position of the sensor in the world frame. Defaults to (0.0, 0.0, 0.0). - euler_offset : tuple[float, float, float], optional - The mounting offset quaternion of the sensor in the world frame. Defaults to (0.0, 0.0, 0.0). pattern: RaycastPatternOptions The raycasting pattern for the sensor. min_range : float, optional @@ -333,12 +317,6 @@ class RaycasterOptions(RigidSensorOptionsMixin, SensorOptions): only_cast_fixed : bool, optional Whether to only cast rays on fixed geoms. Defaults to False. This is a shared option, so the value of this option for the **first** Raycaster sensor will be the behavior for **all** Raycaster sensors. - delay : float, optional - The delay in seconds before the sensor data is read. - update_ground_truth_only : bool, optional - If True, the sensor will only update the ground truth cache, and not the measured cache. - draw_debug : bool, optional - If True and the interactive viewer is active, spheres will be drawn at every hit point. debug_sphere_radius: float, optional The radius of each debug hit point sphere drawn in the scene. Defaults to 0.02. """ @@ -556,8 +534,8 @@ def _update_shared_ground_truth_cache( ray_directions=shared_metadata.ray_dirs, max_ranges=shared_metadata.max_ranges, no_hit_values=shared_metadata.no_hit_values, - points_to_sensor_idx=shared_metadata.points_to_sensor_idx, is_world_frame=shared_metadata.return_world_frame, + points_to_sensor_idx=shared_metadata.points_to_sensor_idx, sensor_cache_offsets=shared_metadata.sensor_cache_offsets, sensor_point_offsets=shared_metadata.sensor_point_offsets, sensor_point_counts=shared_metadata.sensor_point_counts, diff --git a/test_raycaster.py b/test_raycaster.py deleted file mode 100644 index 921cee942..000000000 --- a/test_raycaster.py +++ /dev/null @@ -1,126 +0,0 @@ -import numpy as np - -import genesis as gs -from tests.utils import assert_allclose - - -def expand_batch_dim(values: tuple[float, ...], n_envs: int) -> tuple[float, ...] | np.ndarray: - """Helper function to expand expected values for n_envs dimension.""" - if n_envs == 0: - return values - return np.tile(np.array(values), (n_envs,) + (1,) * len(values)) - - -def test_raycaster_hits(show_viewer, tol, n_envs, only_cast_fixed): - """Test if the Raycaster sensor with GridPattern rays pointing to ground returns the correct distance.""" - EXPECTED_DISTANCE = 0.7 - NUM_RAYS_XY = 3 - BOX_HEIGHT = 0.2 - SPHERE_POS = (4.0, 0.0, 1.0) - RAYCAST_GRID_SIZE = 0.5 - - scene = gs.Scene( - profiling_options=gs.options.ProfilingOptions(show_FPS=False), - show_viewer=show_viewer, - ) - - scene.add_entity( - gs.morphs.Plane( - is_free=False, # TODO: remove after PR #1795 is merged - ) - ) - - box_obstacle = scene.add_entity( - gs.morphs.Box( - size=(RAYCAST_GRID_SIZE / 2.0, RAYCAST_GRID_SIZE / 2.0, BOX_HEIGHT), - # pos=(0.0, 0.0, -BOX_HEIGHT), # init below ground to not interfere with first raycast - pos=(RAYCAST_GRID_SIZE, RAYCAST_GRID_SIZE, EXPECTED_DISTANCE / 2.0 + BOX_HEIGHT / 2.0), - ), - ) - grid_sensor_box = scene.add_entity( - gs.morphs.Box( - size=(0.1, 0.1, 0.1), - pos=(0.0, 0.0, EXPECTED_DISTANCE + BOX_HEIGHT), - fixed=True, - ), - ) - grid_raycaster = scene.add_sensor( - gs.sensors.Raycaster( - pattern=gs.sensors.raycaster.GridPattern( - resolution=1.0 / (NUM_RAYS_XY - 1.0), - size=(1.0, 1.0), - direction=(0.0, 0.0, -1.0), # pointing downwards to ground - ), - entity_idx=grid_sensor_box.idx, - pos_offset=(0.0, 0.0, -BOX_HEIGHT), - return_world_frame=True, - only_cast_fixed=only_cast_fixed, - draw_debug=True, - ) - ) - - spherical_sensor = scene.add_entity( - gs.morphs.Sphere( - radius=EXPECTED_DISTANCE, - pos=SPHERE_POS, - fixed=True, - ), - ) - spherical_raycaster = scene.add_sensor( - gs.sensors.Raycaster( - pattern=gs.sensors.raycaster.SphericalPattern( - n_points=(NUM_RAYS_XY, NUM_RAYS_XY), - ), - entity_idx=spherical_sensor.idx, - return_world_frame=False, - only_cast_fixed=only_cast_fixed, - ) - ) - - scene.build(n_envs=n_envs) - - scene.step() - - grid_hits = grid_raycaster.read().points - grid_distances = grid_raycaster.read().distances - spherical_distances = spherical_raycaster.read().distances - - expected_shape = (NUM_RAYS_XY, NUM_RAYS_XY) if n_envs == 0 else (n_envs, NUM_RAYS_XY, NUM_RAYS_XY) - assert grid_distances.shape == spherical_distances.shape == expected_shape - - grid_distance_min = grid_distances.min() - assert grid_distances.min() < EXPECTED_DISTANCE - tol, "Raycaster grid pattern should have hit obstacle" - - ground_hit_mask = grid_distances > grid_distance_min + tol - assert_allclose( - grid_hits[ground_hit_mask][..., 2], - 0.0, - tol=tol, - err_msg="Raycaster grid pattern should hit ground (z≈0)", - ) - assert_allclose( - grid_distances[ground_hit_mask], - EXPECTED_DISTANCE, - tol=tol, - err_msg=f"Raycaster grid pattern should measure {EXPECTED_DISTANCE}m to ground plane", - ) - assert_allclose( - spherical_distances, - EXPECTED_DISTANCE, - tol=1e-2, # since sphere mesh is discretized, we need a larger tolerance here - err_msg=f"Raycaster spherical pattern should measure {EXPECTED_DISTANCE}m to the sphere around it", - ) - - for _ in range(5): - scene.step() - - assert grid_raycaster.read().distances.min() > grid_distance_min, "Raycaster should hit falling obstacle" - - -if __name__ == "__main__": - gs.init() - - test_raycaster_hits(show_viewer=False, tol=1e-3, n_envs=0, only_cast_fixed=False) - test_raycaster_hits(show_viewer=False, tol=1e-3, n_envs=2, only_cast_fixed=False) - # test_raycaster_hits(show_viewer=False, tol=1e-3, n_envs=10, only_cast_fixed=False) - # test_raycaster_hits(show_viewer=False, tol=1e-3, n_envs=10, only_cast_fixed=True) diff --git a/tests/test_sensors.py b/tests/test_sensors.py index 960407763..6fdd23ddd 100644 --- a/tests/test_sensors.py +++ b/tests/test_sensors.py @@ -306,6 +306,7 @@ def test_raycaster_hits(show_viewer, tol, n_envs, only_cast_fixed): radius=EXPECTED_DISTANCE, pos=SPHERE_POS, fixed=True, + is_free=False, # TODO: remove after PR #1795 is merged ), ) spherical_raycaster = scene.add_sensor( @@ -331,17 +332,20 @@ def test_raycaster_hits(show_viewer, tol, n_envs, only_cast_fixed): assert grid_distances.shape == spherical_distances.shape == expected_shape grid_distance_min = grid_distances.min() - assert grid_distances.min() < EXPECTED_DISTANCE - tol, "Raycaster grid pattern should have hit obstacle" + if not only_cast_fixed: + assert grid_distances.min() < EXPECTED_DISTANCE - tol, "Raycaster grid pattern should have hit obstacle" + ground_hit_mask = grid_distances > grid_distance_min + tol + grid_hits = grid_hits[ground_hit_mask] + grid_distances = grid_distances[ground_hit_mask] - ground_hit_mask = grid_distances > grid_distance_min + tol assert_allclose( - grid_hits[ground_hit_mask][..., 2], + grid_hits[..., 2], 0.0, tol=tol, err_msg="Raycaster grid pattern should hit ground (z≈0)", ) assert_allclose( - grid_distances[ground_hit_mask], + grid_distances, EXPECTED_DISTANCE, tol=tol, err_msg=f"Raycaster grid pattern should measure {EXPECTED_DISTANCE}m to ground plane", @@ -353,7 +357,8 @@ def test_raycaster_hits(show_viewer, tol, n_envs, only_cast_fixed): err_msg=f"Raycaster spherical pattern should measure {EXPECTED_DISTANCE}m to the sphere around it", ) - for _ in range(5): - scene.step() + if not only_cast_fixed: + for _ in range(5): + scene.step() - assert grid_raycaster.read().distances.min() > grid_distance_min, "Raycaster should hit falling obstacle" + assert grid_raycaster.read().distances.min() > grid_distance_min, "Raycaster should hit falling obstacle" From 2d5194a2b914bf70dc29e4567366dfc0399cb81e Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Fri, 3 Oct 2025 15:48:56 -0400 Subject: [PATCH 13/28] rebase updates, add sensor draw debug test --- examples/sensors/imu_franka.py | 9 ++ .../entities/rigid_entity/rigid_entity.py | 4 +- .../solvers/rigid/rigid_solver_decomp.py | 6 +- genesis/options/morphs.py | 2 +- genesis/sensors/imu.py | 7 +- genesis/sensors/raycaster/raycaster.py | 67 ++++++----- tests/test_sensors.py | 106 ++++++++++++++++-- 7 files changed, 159 insertions(+), 42 deletions(-) diff --git a/examples/sensors/imu_franka.py b/examples/sensors/imu_franka.py index 16cdc9ee1..a97e17692 100644 --- a/examples/sensors/imu_franka.py +++ b/examples/sensors/imu_franka.py @@ -40,6 +40,7 @@ def main(): end_effector = franka.get_link("hand") motors_dof = (0, 1, 2, 3, 4, 5, 6) + ########################## record sensor data ########################## imu = scene.add_sensor( gs.sensors.IMU( entity_idx=franka.idx, @@ -87,6 +88,14 @@ def data_func(): else: print("matplotlib or pyqtgraph not found, skipping real-time plotting.") + imu.start_recording( + gs.recorders.CSVFile(filename="imu_data.csv"), + ) + scene.start_recording( + data_func=lambda: imu.read().lin_acc, + rec_options=gs.recorders.NPZFile(filename="imu_data.npz"), + ) + ########################## build ########################## scene.build() diff --git a/genesis/engine/entities/rigid_entity/rigid_entity.py b/genesis/engine/entities/rigid_entity/rigid_entity.py index cb59f22a4..2524d1fc5 100644 --- a/genesis/engine/entities/rigid_entity/rigid_entity.py +++ b/genesis/engine/entities/rigid_entity/rigid_entity.py @@ -3003,7 +3003,7 @@ def _kernel_get_free_verts( tensor: ti.types.ndarray(), free_verts_idx_local: ti.types.ndarray(), verts_state_start: ti.i32, - free_verts_state: array_class.FreeVertsState, + free_verts_state: array_class.VertsState, ): n_verts = free_verts_idx_local.shape[0] _B = tensor.shape[0] @@ -3017,7 +3017,7 @@ def _kernel_get_fixed_verts( tensor: ti.types.ndarray(), fixed_verts_idx_local: ti.types.ndarray(), verts_state_start: ti.i32, - fixed_verts_state: array_class.FixedVertsState, + fixed_verts_state: array_class.VertsState, ): n_verts = fixed_verts_idx_local.shape[0] _B = tensor.shape[0] diff --git a/genesis/engine/solvers/rigid/rigid_solver_decomp.py b/genesis/engine/solvers/rigid/rigid_solver_decomp.py index 370bc10c1..270bbfb26 100644 --- a/genesis/engine/solvers/rigid/rigid_solver_decomp.py +++ b/genesis/engine/solvers/rigid/rigid_solver_decomp.py @@ -5109,8 +5109,8 @@ def func_update_verts_for_geom( def func_update_all_verts( geoms_state: array_class.GeomsState, verts_info: array_class.VertsInfo, - free_verts_state: array_class.FreeVertsState, - fixed_verts_state: array_class.FixedVertsState, + free_verts_state: array_class.VertsState, + fixed_verts_state: array_class.VertsState, ): n_verts = verts_info.geom_idx.shape[0] _B = geoms_state.pos.shape[1] @@ -5140,7 +5140,7 @@ def kernel_update_all_verts( g_pos = geoms_state.pos[verts_info.geom_idx[i_v], i_b] g_quat = geoms_state.quat[verts_info.geom_idx[i_v], i_b] verts_state_idx = verts_info.verts_state_idx[i_v] - if verts_info.is_free[i_v]: + if not verts_info.is_fixed[i_v]: free_verts_state.pos[verts_state_idx, i_b] = gu.ti_transform_by_trans_quat( verts_info.init_pos[i_v], g_pos, g_quat ) diff --git a/genesis/options/morphs.py b/genesis/options/morphs.py index bf2a651e5..2a463602b 100644 --- a/genesis/options/morphs.py +++ b/genesis/options/morphs.py @@ -84,7 +84,7 @@ class Morph(Options): visualization: bool = True collision: bool = True requires_jac_and_IK: bool = False - is_free: bool = True + is_free: bool | None = None def __init__(self, **data): super().__init__(**data) diff --git a/genesis/sensors/imu.py b/genesis/sensors/imu.py index d1251fc5e..3f67c64e9 100644 --- a/genesis/sensors/imu.py +++ b/genesis/sensors/imu.py @@ -256,7 +256,7 @@ def build(self): ) if self._options.draw_debug: self.quat_offset = self._shared_metadata.offsets_quat[0, self._idx] - self.pos_offset = self._shared_metadata.offsets_pos[0, self._idx] + self.pos_offset = self._shared_metadata.offsets_pos[0, self._idx].unsqueeze(0) def _get_return_format(self) -> tuple[tuple[int, ...], ...]: return (3,), (3,) @@ -339,11 +339,12 @@ def _draw_debug(self, context: "RasterizerContext"): # transform from local frame to world frame offset_quat = transform_quat_by_quat(self.quat_offset, quat) - acc_vec = tensor_to_array(transform_by_quat(acc_vec, offset_quat)) - gyro_vec = tensor_to_array(transform_by_quat(gyro_vec, offset_quat)) + acc_vec = tensor_to_array(transform_by_quat(acc_vec, offset_quat)).flatten() + gyro_vec = tensor_to_array(transform_by_quat(gyro_vec, offset_quat)).flatten() for debug_object in self.debug_objects: if debug_object is not None: context.clear_debug_object(debug_object) + self.debug_objects[0] = context.draw_debug_arrow(pos=pos[0], vec=acc_vec, color=self._options.debug_acc_color) self.debug_objects[1] = context.draw_debug_arrow(pos=pos[0], vec=gyro_vec, color=self._options.debug_gyro_color) diff --git a/genesis/sensors/raycaster/raycaster.py b/genesis/sensors/raycaster/raycaster.py index 90f97b93c..0858e4803 100644 --- a/genesis/sensors/raycaster/raycaster.py +++ b/genesis/sensors/raycaster/raycaster.py @@ -19,7 +19,7 @@ transform_by_quat, transform_by_trans_quat, ) -from genesis.utils.misc import concat_with_tensor, make_tensor_field, ti_to_torch +from genesis.utils.misc import concat_with_tensor, make_tensor_field, tensor_to_array, ti_to_torch from genesis.vis.rasterizer_context import RasterizerContext from ..base_sensor import ( @@ -150,12 +150,12 @@ def kernel_update_aabbs( for i in ti.static(range(3)): i_v = verts_info.verts_state_idx[faces_info.verts_idx[i_f][i]] - if verts_info.is_free[faces_info.verts_idx[i_f][i]]: - pos_v = free_verts_state.pos[i_v, i_b] + if verts_info.is_fixed[faces_info.verts_idx[i_f][i]]: + pos_v = fixed_verts_state.pos[i_v] aabb_state.aabbs[i_b, i_f].min = ti.min(aabb_state.aabbs[i_b, i_f].min, pos_v) aabb_state.aabbs[i_b, i_f].max = ti.max(aabb_state.aabbs[i_b, i_f].max, pos_v) else: - pos_v = fixed_verts_state.pos[i_v] + pos_v = free_verts_state.pos[i_v, i_b] aabb_state.aabbs[i_b, i_f].min = ti.min(aabb_state.aabbs[i_b, i_f].min, pos_v) aabb_state.aabbs[i_b, i_f].max = ti.max(aabb_state.aabbs[i_b, i_f].max, pos_v) @@ -232,22 +232,22 @@ def kernel_cast_rays( original_tri_idx = bvh_morton_codes[0, sorted_leaf_idx][1] i_f = map_faces[original_tri_idx] - is_free = verts_info.is_free[faces_info.verts_idx[i_f][0]] + is_fixed = verts_info.is_fixed[faces_info.verts_idx[i_f][0]] v0 = ti.Vector.zero(gs.ti_float, 3) v1 = ti.Vector.zero(gs.ti_float, 3) v2 = ti.Vector.zero(gs.ti_float, 3) - if is_free: - v0 = free_verts_state.pos[verts_info.verts_state_idx[faces_info.verts_idx[i_f][0]], i_b] - v1 = free_verts_state.pos[verts_info.verts_state_idx[faces_info.verts_idx[i_f][1]], i_b] - v2 = free_verts_state.pos[verts_info.verts_state_idx[faces_info.verts_idx[i_f][2]], i_b] - - else: + if is_fixed: v0 = fixed_verts_state.pos[verts_info.verts_state_idx[faces_info.verts_idx[i_f][0]]] v1 = fixed_verts_state.pos[verts_info.verts_state_idx[faces_info.verts_idx[i_f][1]]] v2 = fixed_verts_state.pos[verts_info.verts_state_idx[faces_info.verts_idx[i_f][2]]] + else: + v0 = free_verts_state.pos[verts_info.verts_state_idx[faces_info.verts_idx[i_f][0]], i_b] + v1 = free_verts_state.pos[verts_info.verts_state_idx[faces_info.verts_idx[i_f][1]], i_b] + v2 = free_verts_state.pos[verts_info.verts_state_idx[faces_info.verts_idx[i_f][2]], i_b] + # Perform the expensive ray-triangle intersection test hit_result = ray_triangle_intersection(ray_start_world, ray_direction_world, v0, v1, v2) @@ -365,6 +365,7 @@ class RaycasterSharedMetadata(RigidSensorMetadataMixin, SharedSensorMetadata): sensor_cache_offsets: torch.Tensor = make_tensor_field((0,), dtype_factory=lambda: gs.tc_int) sensor_point_offsets: torch.Tensor = make_tensor_field((0,), dtype_factory=lambda: gs.tc_int) sensor_point_counts: torch.Tensor = make_tensor_field((0,), dtype_factory=lambda: gs.tc_int) + output_hits: torch.Tensor = make_tensor_field((0, 0)) class RaycasterData(NamedTuple): @@ -385,6 +386,7 @@ def __init__( ): super().__init__(options, shared_metadata, data_cls, manager) self.debug_objects: list["Mesh | None"] = [] + self.ray_starts: torch.Tensor = torch.empty((0, 3), device=gs.device, dtype=gs.tc_float) @classmethod def _build_bvh(cls, shared_metadata: RaycasterSharedMetadata): @@ -392,15 +394,11 @@ def _build_bvh(cls, shared_metadata: RaycasterSharedMetadata): torch_map_faces = torch.arange(n_faces, dtype=torch.int32, device=gs.device) if shared_metadata.only_cast_fixed: # count the number of faces in a fixed geoms - geom_is_fixed = torch.logical_not(ti_to_torch(shared_metadata.solver.geoms_info.is_free)) + geom_is_fixed = ti_to_torch(shared_metadata.solver.geoms_info.is_fixed) faces_geom = ti_to_torch(shared_metadata.solver.faces_info.geom_idx) n_faces = torch.sum(geom_is_fixed[faces_geom]).item() if n_faces == 0: - gs.raise_exception( - "No fixed geoms found in the scene. To use only_cast_fixed, " - "at least some entities should have is_free=False." - # TODO: update message after PR #1795 is merged - ) + gs.raise_exception("No fixed geoms found in the scene.") torch_map_faces = torch.where(geom_is_fixed[faces_geom])[0] shared_metadata.map_faces = ti.field(ti.i32, (n_faces)) @@ -432,26 +430,33 @@ def build(self): # first lidar sensor initialization: build aabb and bvh if self._shared_metadata.bvh is None: + self._shared_metadata.output_hits = torch.empty( + (self._manager._sim._B, 0), device=gs.device, dtype=gs.tc_float + ) self._shared_metadata.only_cast_fixed = self._options.only_cast_fixed # set for first only self._build_bvh(self._shared_metadata) - pattern = self._options.pattern - self._shared_metadata.patterns.append(pattern) + self._shared_metadata.patterns.append(self._options.pattern) pos_offset = self._shared_metadata.offsets_pos[0, -1, :] # all envs have same offset on build quat_offset = self._shared_metadata.offsets_quat[0, -1, :] - ray_starts = pattern.ray_starts.reshape(-1, 3) - ray_starts = transform_by_trans_quat(ray_starts, pos_offset, quat_offset) - self._shared_metadata.ray_starts = torch.cat([self._shared_metadata.ray_starts, ray_starts]) + ray_starts = self._options.pattern.ray_starts.reshape(-1, 3) + self.ray_starts = transform_by_trans_quat(ray_starts, pos_offset, quat_offset) + self._shared_metadata.ray_starts = torch.cat([self._shared_metadata.ray_starts, self.ray_starts]) - ray_dirs = pattern.ray_dirs.reshape(-1, 3) + ray_dirs = self._options.pattern.ray_dirs.reshape(-1, 3) ray_dirs = transform_by_quat(ray_dirs, quat_offset) self._shared_metadata.ray_dirs = torch.cat([self._shared_metadata.ray_dirs, ray_dirs]) - num_rays = math.prod(pattern.return_shape) + num_rays = math.prod(self._options.pattern.return_shape) self._shared_metadata.sensors_ray_start_idx.append(self._shared_metadata.total_n_rays) # These fields are used to properly index into the big cache tensor in kernel_cast_rays + self._shared_metadata.output_hits = concat_with_tensor( + self._shared_metadata.output_hits, + torch.empty((self._manager._sim._B, self._cache_size), device=gs.device, dtype=gs.tc_float), + dim=-1, + ) self._shared_metadata.sensor_cache_offsets = concat_with_tensor( self._shared_metadata.sensor_cache_offsets, self._cache_idx ) @@ -539,8 +544,12 @@ def _update_shared_ground_truth_cache( sensor_cache_offsets=shared_metadata.sensor_cache_offsets, sensor_point_offsets=shared_metadata.sensor_point_offsets, sensor_point_counts=shared_metadata.sensor_point_counts, - output_hits=shared_ground_truth_cache, + output_hits=( + shared_ground_truth_cache if shared_ground_truth_cache.is_contiguous() else shared_metadata.output_hits + ), ) + if not shared_ground_truth_cache.is_contiguous(): + shared_ground_truth_cache[:] = shared_metadata.output_hits @classmethod def _update_shared_cache( @@ -561,9 +570,15 @@ def _draw_debug(self, context: "RasterizerContext"): """ data = self.read().points.reshape(self._manager._sim._B, -1, 3) - for env_idx, color in zip(range(data.shape[0]), itertools.cycle(DEBUG_COLORS)): + for env_idx, color in zip(context.rendered_envs_idx, itertools.cycle(DEBUG_COLORS)): points = data[env_idx] + if not self._options.return_world_frame: + # transform from local frame to world frame + quat = self._link.get_quat(envs_idx=env_idx) + pos = self._link.get_pos(envs_idx=env_idx) + points = tensor_to_array(transform_by_trans_quat(points + self.ray_starts, pos, quat)) + if self.debug_objects[env_idx] is not None: context.clear_debug_object(self.debug_objects[env_idx]) diff --git a/tests/test_sensors.py b/tests/test_sensors.py index 6fdd23ddd..7a4661180 100644 --- a/tests/test_sensors.py +++ b/tests/test_sensors.py @@ -4,7 +4,7 @@ import genesis as gs -from .utils import assert_allclose, assert_array_equal +from .utils import assert_allclose, assert_array_equal, rgb_array_to_png_bytes def expand_batch_dim(values: tuple[float, ...], n_envs: int) -> tuple[float, ...] | np.ndarray: @@ -266,11 +266,7 @@ def test_raycaster_hits(show_viewer, tol, n_envs, only_cast_fixed): show_viewer=show_viewer, ) - scene.add_entity( - gs.morphs.Plane( - is_free=False, # TODO: remove after PR #1795 is merged - ) - ) + scene.add_entity(gs.morphs.Plane()) box_obstacle = scene.add_entity( gs.morphs.Box( @@ -306,7 +302,6 @@ def test_raycaster_hits(show_viewer, tol, n_envs, only_cast_fixed): radius=EXPECTED_DISTANCE, pos=SPHERE_POS, fixed=True, - is_free=False, # TODO: remove after PR #1795 is merged ), ) spherical_raycaster = scene.add_sensor( @@ -362,3 +357,100 @@ def test_raycaster_hits(show_viewer, tol, n_envs, only_cast_fixed): scene.step() assert grid_raycaster.read().distances.min() > grid_distance_min, "Raycaster should hit falling obstacle" + + +@pytest.mark.required +@pytest.mark.parametrize("n_envs", [0, 2]) +@pytest.mark.parametrize("backend", [gs.cpu]) +def test_sensors_draw_debug(png_snapshot, n_envs): + """Test that sensor debug drawing works correctly and renders visible debug elements.""" + CAM_RES = (640, 480) + + scene = gs.Scene( + viewer_options=gs.options.ViewerOptions( + camera_pos=(2.0, 2.0, 2.0), + camera_lookat=(0.0, 0.0, 0.0), + camera_fov=40, + res=CAM_RES, + ), + profiling_options=gs.options.ProfilingOptions(show_FPS=False), + show_viewer=True, + ) + + scene.add_entity(gs.morphs.Plane()) + + floating_box = scene.add_entity( + gs.morphs.Box( + size=(0.1, 0.1, 0.1), + pos=(0.0, 0.0, 0.5), + fixed=True, + ) + ) + scene.add_sensor( + gs.sensors.IMU( + entity_idx=floating_box.idx, + pos_offset=(0.0, 0.0, 0.1), + draw_debug=True, + ) + ) + + ground_box = scene.add_entity( + gs.morphs.Box( + size=(0.4, 0.2, 0.1), + pos=(-0.25, 0.0, 0.05), + ) + ) + scene.add_sensor( + gs.sensors.Contact( + entity_idx=ground_box.idx, + draw_debug=True, + ) + ) + scene.add_sensor( + gs.sensors.ContactForce( + entity_idx=ground_box.idx, + draw_debug=True, + ) + ) + scene.add_sensor( + gs.sensors.Raycaster( + pattern=gs.sensors.raycaster.GridPattern( + resolution=0.1, + size=(0.4, 0.4), + direction=(0.0, 0.0, -1.0), + ), + entity_idx=floating_box.idx, + pos_offset=(0.0, 0.0, -0.1), + return_world_frame=True, + only_cast_fixed=False, + draw_debug=True, + debug_sphere_radius=0.02, + ) + ) + scene.add_sensor( + gs.sensors.Raycaster( + pattern=gs.sensors.raycaster.SphericalPattern( + n_points=(6, 6), + fov=(60.0, (-120.0, -60.0)), + ), + entity_idx=floating_box.idx, + pos_offset=(0.0, 0.5, 0.0), + return_world_frame=False, + only_cast_fixed=True, + draw_debug=True, + debug_sphere_radius=0.01, + ) + ) + + scene.build(n_envs=n_envs) + + for _ in range(5): + scene.step() + + pyrender_viewer = scene.visualizer.viewer._pyrender_viewer + assert pyrender_viewer.is_active + rgb_arr, *_ = pyrender_viewer.render_offscreen( + pyrender_viewer._camera_node, pyrender_viewer._renderer, rgb=True, depth=False, seg=False, normal=False + ) + + assert rgb_array_to_png_bytes(rgb_arr) == png_snapshot From f916f584372e5931d026e10d37d56f4614bc3c03 Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Sat, 4 Oct 2025 01:32:40 -0400 Subject: [PATCH 14/28] sensor draw debug test fixes --- examples/sensors/imu_franka.py | 5 +- genesis/sensors/contact_force.py | 33 +++++++----- genesis/sensors/raycaster/raycaster.py | 75 ++++++++++++++++++-------- tests/test_sensors.py | 16 +++--- 4 files changed, 81 insertions(+), 48 deletions(-) diff --git a/examples/sensors/imu_franka.py b/examples/sensors/imu_franka.py index a97e17692..ab142feb1 100644 --- a/examples/sensors/imu_franka.py +++ b/examples/sensors/imu_franka.py @@ -88,11 +88,8 @@ def data_func(): else: print("matplotlib or pyqtgraph not found, skipping real-time plotting.") - imu.start_recording( - gs.recorders.CSVFile(filename="imu_data.csv"), - ) scene.start_recording( - data_func=lambda: imu.read().lin_acc, + data_func=lambda: imu.read()._asdict(), rec_options=gs.recorders.NPZFile(filename="imu_data.npz"), ) diff --git a/genesis/sensors/contact_force.py b/genesis/sensors/contact_force.py index c957f09aa..48a801ea1 100644 --- a/genesis/sensors/contact_force.py +++ b/genesis/sensors/contact_force.py @@ -165,10 +165,10 @@ def _draw_debug(self, context: "RasterizerContext"): Only draws for first rendered environment. """ - env_idx = context.rendered_envs_idx[0] + env_idx = context.rendered_envs_idx[0] if self._manager._sim.n_envs > 0 else None - pos = self._link.get_pos(envs_idx=env_idx)[0] - is_contact = self.read(envs_idx=env_idx if self._manager._sim.n_envs > 0 else None).item() + pos = self._link.get_pos(envs_idx=env_idx).squeeze(0) + is_contact = self.read(envs_idx=env_idx).squeeze(0).item() if is_contact: if self.debug_object is None: @@ -234,6 +234,7 @@ class ContactForceSensorMetadata(RigidSensorMetadataMixin, NoisySensorMetadataMi min_force: torch.Tensor = make_tensor_field((0, 3)) max_force: torch.Tensor = make_tensor_field((0, 3)) + output_forces: torch.Tensor = make_tensor_field((0, 0)) @register_sensor(ContactForceSensorOptions, ContactForceSensorMetadata, tuple) @@ -276,6 +277,13 @@ def build(self): _to_tuple(self._options.max_force, length_per_value=3), ) + if self._shared_metadata.output_forces.numel() == 0: + self._shared_metadata.output_forces.reshape(self._manager._sim._B, 0) + self._shared_metadata.output_forces = concat_with_tensor( + self._shared_metadata.output_forces, + torch.empty((self._manager._sim._B, 3), dtype=gs.tc_float, device=gs.device), + ) + def _get_return_format(self) -> tuple[int, ...]: return (3,) @@ -308,8 +316,10 @@ def _update_shared_ground_truth_cache( link_b.contiguous(), links_quat.contiguous(), shared_metadata.links_idx, - shared_ground_truth_cache, + shared_ground_truth_cache if shared_ground_truth_cache.is_contiguous() else shared_metadata.output_forces, ) + if not shared_ground_truth_cache.is_contiguous(): + shared_ground_truth_cache[:] = shared_metadata.output_forces @classmethod def _update_shared_cache( @@ -344,17 +354,12 @@ def _draw_debug(self, context: "RasterizerContext"): """ env_idx = context.rendered_envs_idx[0] - pos = self._link.get_pos(envs_idx=env_idx) - quat = self._link.get_quat(envs_idx=env_idx) + pos = self._link.get_pos(envs_idx=env_idx).squeeze(0) + quat = self._link.get_quat(envs_idx=env_idx).squeeze(0) - # TODO: cleanup env handling - if self._manager._sim.n_envs > 0: - force = self.read(envs_idx=env_idx) - vec = tensor_to_array(transform_by_quat(force * self._options.debug_scale, quat))[0] - else: - force = self.read() # cannot specify envs_idx when n_envs=0 - vec = tensor_to_array(transform_by_quat(force * self._options.debug_scale, quat)) + force = self.read(envs_idx=env_idx if self._manager._sim.n_envs > 0 else None) + vec = tensor_to_array(transform_by_quat(force.squeeze(0) * self._options.debug_scale, quat)) if self.debug_object is not None: context.clear_debug_object(self.debug_object) - self.debug_object = context.draw_debug_arrow(pos=pos[0], vec=vec, color=self._options.debug_color) + self.debug_object = context.draw_debug_arrow(pos=pos, vec=vec, color=self._options.debug_color) diff --git a/genesis/sensors/raycaster/raycaster.py b/genesis/sensors/raycaster/raycaster.py index 0858e4803..652b0a1b1 100644 --- a/genesis/sensors/raycaster/raycaster.py +++ b/genesis/sensors/raycaster/raycaster.py @@ -1,4 +1,3 @@ -import itertools import math from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, NamedTuple, Type @@ -16,10 +15,11 @@ ti_normalize, ti_transform_by_quat, ti_transform_by_trans_quat, + trans_to_T, transform_by_quat, transform_by_trans_quat, ) -from genesis.utils.misc import concat_with_tensor, make_tensor_field, tensor_to_array, ti_to_torch +from genesis.utils.misc import concat_with_tensor, make_tensor_field, ti_to_torch from genesis.vis.rasterizer_context import RasterizerContext from ..base_sensor import ( @@ -43,6 +43,9 @@ (0.2, 0.6, 1.0, 1.0), (1.0, 1.0, 0.2, 1.0), ) +# A constant stack size should be sufficient for BVH traversal. +# https://madmann91.github.io/2021/01/06/bvhs-part-2.html +# https://forums.developer.nvidia.com/t/thinking-parallel-part-ii-tree-traversal-on-the-gpu/148342 STACK_SIZE = ti.static(64) @@ -264,19 +267,24 @@ def kernel_cast_rays( node_stack[stack_idx + 1] = node.right stack_idx += 2 + # print(f"Ray {i_p}: hit_face={hit_face}") + # --- 3. Process Hit Result --- # The format of output_hits is: [sensor1 points][sensor1 ranges][sensor2 points][sensor2 ranges]... i_p_sensor = i_p - sensor_point_offsets[i_s] i_p_offset = sensor_cache_offsets[i_s] # cumulative cache offset for this sensor n_points_in_sensor = sensor_point_counts[i_s] # number of points in this sensor + i_p_dist = i_p_offset + n_points_in_sensor * 3 + i_p_sensor # index for distance output + if hit_face >= 0: dist = max_range # Store distance at: cache_offset + (num_points_in_sensor * 3) + point_idx_in_sensor - output_hits[i_b, i_p_offset + n_points_in_sensor * 3 + i_p_sensor] = dist + output_hits[i_b, i_p_dist] = dist if is_world_frame[i_s]: hit_point = ray_start_world + dist * ray_direction_world + # Store points at: cache_offset + point_idx_in_sensor * 3 output_hits[i_b, i_p_offset + i_p_sensor * 3 + 0] = hit_point.x output_hits[i_b, i_p_offset + i_p_sensor * 3 + 1] = hit_point.y @@ -295,7 +303,7 @@ def kernel_cast_rays( output_hits[i_b, i_p_offset + i_p_sensor * 3 + 0] = 0.0 output_hits[i_b, i_p_offset + i_p_sensor * 3 + 1] = 0.0 output_hits[i_b, i_p_offset + i_p_sensor * 3 + 2] = 0.0 - output_hits[i_b, i_p_offset + n_points_in_sensor * 3 + i_p_sensor] = no_hit_values[i_s] + output_hits[i_b, i_p_dist] = no_hit_values[i_s] class RaycasterOptions(RigidSensorOptionsMixin, SensorOptions): @@ -318,7 +326,11 @@ class RaycasterOptions(RigidSensorOptionsMixin, SensorOptions): Whether to only cast rays on fixed geoms. Defaults to False. This is a shared option, so the value of this option for the **first** Raycaster sensor will be the behavior for **all** Raycaster sensors. debug_sphere_radius: float, optional - The radius of each debug hit point sphere drawn in the scene. Defaults to 0.02. + The radius of each debug sphere drawn in the scene. Defaults to 0.02. + debug_ray_start_color: float, optional + The color of each debug ray start sphere drawn in the scene. Defaults to (0.5, 0.5, 1.0, 1.0). + debug_ray_hit_color: float, optional + The color of each debug ray hit point sphere drawn in the scene. Defaults to (1.0, 0.5, 0.5, 1.0). """ pattern: RaycastPattern @@ -329,6 +341,8 @@ class RaycasterOptions(RigidSensorOptionsMixin, SensorOptions): only_cast_fixed: bool = False debug_sphere_radius: float = 0.02 + debug_ray_start_color: tuple[float, float, float, float] = (0.5, 0.5, 1.0, 1.0) + debug_ray_hit_color: tuple[float, float, float, float] = (1.0, 0.5, 0.5, 1.0) def model_post_init(self, _): if self.min_range < 0.0: @@ -433,6 +447,9 @@ def build(self): self._shared_metadata.output_hits = torch.empty( (self._manager._sim._B, 0), device=gs.device, dtype=gs.tc_float ) + self._shared_metadata.sensor_cache_offsets = concat_with_tensor( + self._shared_metadata.sensor_cache_offsets, 0 + ) self._shared_metadata.only_cast_fixed = self._options.only_cast_fixed # set for first only self._build_bvh(self._shared_metadata) @@ -458,7 +475,7 @@ def build(self): dim=-1, ) self._shared_metadata.sensor_cache_offsets = concat_with_tensor( - self._shared_metadata.sensor_cache_offsets, self._cache_idx + self._shared_metadata.sensor_cache_offsets, self._cache_size ) self._shared_metadata.sensor_point_offsets = concat_with_tensor( self._shared_metadata.sensor_point_offsets, self._shared_metadata.total_n_rays @@ -466,7 +483,6 @@ def build(self): self._shared_metadata.sensor_point_counts = concat_with_tensor( self._shared_metadata.sensor_point_counts, num_rays ) - self._shared_metadata.total_n_rays += num_rays self._shared_metadata.points_to_sensor_idx = concat_with_tensor( @@ -482,9 +498,6 @@ def build(self): no_hit_value = self._options.no_hit_value if self._options.no_hit_value is not None else self._options.max_range self._shared_metadata.no_hit_values = concat_with_tensor(self._shared_metadata.no_hit_values, no_hit_value) - if self._options.draw_debug: - self.debug_objects = [None] * self._manager._sim._B - @classmethod def reset(cls, shared_metadata: RaycasterSharedMetadata, envs_idx): super().reset(shared_metadata, envs_idx) @@ -566,22 +579,38 @@ def _draw_debug(self, context: "RasterizerContext"): """ Draw hit points as spheres in the scene. - Spheres will be different colors per environment. + Only draws for first rendered environment. """ - data = self.read().points.reshape(self._manager._sim._B, -1, 3) + env_idx = context.rendered_envs_idx[0] - for env_idx, color in zip(context.rendered_envs_idx, itertools.cycle(DEBUG_COLORS)): - points = data[env_idx] + points = self.read(envs_idx=env_idx if self._manager._sim.n_envs > 0 else None).points.reshape(-1, 3) - if not self._options.return_world_frame: - # transform from local frame to world frame - quat = self._link.get_quat(envs_idx=env_idx) - pos = self._link.get_pos(envs_idx=env_idx) - points = tensor_to_array(transform_by_trans_quat(points + self.ray_starts, pos, quat)) + pos = self._link.get_pos(envs_idx=env_idx) + quat = self._link.get_quat(envs_idx=env_idx) - if self.debug_objects[env_idx] is not None: - context.clear_debug_object(self.debug_objects[env_idx]) + ray_starts = transform_by_trans_quat(self.ray_starts, pos, quat) - self.debug_objects[env_idx] = context.draw_debug_spheres( - points, radius=self._options.debug_sphere_radius, color=color + if not self._options.return_world_frame: + points = transform_by_trans_quat(points + self.ray_starts, pos, quat) + + if not self.debug_objects: + self.debug_objects.append( + context.draw_debug_spheres( + ray_starts, + radius=self._options.debug_sphere_radius, + color=self._options.debug_ray_start_color, + ) ) + + for i_point in range(points.shape[0]): + self.debug_objects.append( + context.draw_debug_sphere( + points[i_point], + radius=self._options.debug_sphere_radius, + color=self._options.debug_ray_hit_color, + ) + ) + + else: + context.update_debug_objects([self.debug_objects[0]], trans_to_T(ray_starts).unsqueeze(0)) + context.update_debug_objects(self.debug_objects[1:], trans_to_T(points).unsqueeze(0)) diff --git a/tests/test_sensors.py b/tests/test_sensors.py index 7a4661180..8c7283384 100644 --- a/tests/test_sensors.py +++ b/tests/test_sensors.py @@ -360,8 +360,7 @@ def test_raycaster_hits(show_viewer, tol, n_envs, only_cast_fixed): @pytest.mark.required -@pytest.mark.parametrize("n_envs", [0, 2]) -@pytest.mark.parametrize("backend", [gs.cpu]) +@pytest.mark.parametrize("n_envs", [2]) def test_sensors_draw_debug(png_snapshot, n_envs): """Test that sensor debug drawing works correctly and renders visible debug elements.""" CAM_RES = (640, 480) @@ -369,8 +368,8 @@ def test_sensors_draw_debug(png_snapshot, n_envs): scene = gs.Scene( viewer_options=gs.options.ViewerOptions( camera_pos=(2.0, 2.0, 2.0), - camera_lookat=(0.0, 0.0, 0.0), - camera_fov=40, + camera_lookat=(0.0, 0.0, 0.2), + camera_fov=30, res=CAM_RES, ), profiling_options=gs.options.ProfilingOptions(show_FPS=False), @@ -404,27 +403,28 @@ def test_sensors_draw_debug(png_snapshot, n_envs): gs.sensors.Contact( entity_idx=ground_box.idx, draw_debug=True, + debug_sphere_radius=0.08, ) ) scene.add_sensor( gs.sensors.ContactForce( entity_idx=ground_box.idx, draw_debug=True, + debug_scale=0.01, ) ) scene.add_sensor( gs.sensors.Raycaster( pattern=gs.sensors.raycaster.GridPattern( - resolution=0.1, + resolution=0.2, size=(0.4, 0.4), direction=(0.0, 0.0, -1.0), ), entity_idx=floating_box.idx, - pos_offset=(0.0, 0.0, -0.1), + pos_offset=(0.2, 0.0, -0.1), return_world_frame=True, only_cast_fixed=False, draw_debug=True, - debug_sphere_radius=0.02, ) ) scene.add_sensor( @@ -439,6 +439,8 @@ def test_sensors_draw_debug(png_snapshot, n_envs): only_cast_fixed=True, draw_debug=True, debug_sphere_radius=0.01, + debug_ray_start_color=(1.0, 1.0, 0.5, 1.0), + debug_ray_hit_color=(0.5, 1.0, 1.0, 1.0), ) ) From 2128c715a54bcb43b84a5f4ef52eef58bd8fc76d Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Sat, 4 Oct 2025 02:49:28 -0400 Subject: [PATCH 15/28] remove only_cast_fixed option --- examples/sensors/lidar_teleop.py | 25 ++++--- genesis/sensors/raycaster/patterns.py | 4 +- genesis/sensors/raycaster/raycaster.py | 39 ++++------- test_sensor_draw.py | 94 ++++++++++++++++++++++++++ tests/test_sensors.py | 28 +++----- tests/utils.py | 8 +-- 6 files changed, 136 insertions(+), 62 deletions(-) create mode 100644 test_sensor_draw.py diff --git a/examples/sensors/lidar_teleop.py b/examples/sensors/lidar_teleop.py index 5656e588e..d90dd83ef 100644 --- a/examples/sensors/lidar_teleop.py +++ b/examples/sensors/lidar_teleop.py @@ -60,12 +60,7 @@ def main(): parser.add_argument("-B", "--n_envs", type=int, default=0, help="Number of environments to replicate") parser.add_argument("--cpu", action="store_true", help="Run on CPU instead of GPU") parser.add_argument("--use-box", action="store_true", help="Use Box as robot instead of Go2") - parser.add_argument( - "-f", - "--fixed", - action="store_true", - help="Load obstacles as fixed and cast only against fixed objects", - ) + parser.add_argument("-f", "--fixed", action="store_true", help="Load obstacles as fixed.") parser.add_argument( "--pattern", type=str, @@ -96,13 +91,26 @@ def main(): angle = 2 * np.pi * i / NUM_CYLINDERS x = CYLINDER_RING_RADIUS * np.cos(angle) y = CYLINDER_RING_RADIUS * np.sin(angle) - scene.add_entity(gs.morphs.Cylinder(height=1.5, radius=0.3, pos=(x, y, 0.75))) + scene.add_entity( + gs.morphs.Cylinder( + height=1.5, + radius=0.3, + pos=(x, y, 0.75), + fixed=args.fixed, + ) + ) for i in range(NUM_BOXES): angle = 2 * np.pi * i / NUM_BOXES + np.pi / 6 x = BOX_RING_RADIUS * np.cos(angle) y = BOX_RING_RADIUS * np.sin(angle) - scene.add_entity(gs.morphs.Box(size=(0.5, 0.5, 2.0), pos=(x, y, 1.0))) + scene.add_entity( + gs.morphs.Box( + size=(0.5, 0.5, 2.0), + pos=(x, y, 1.0), + fixed=args.fixed, + ) + ) robot_kwargs = dict( pos=(0.0, 0.0, 0.35), @@ -122,7 +130,6 @@ def main(): pos_offset=pos_offset, euler_offset=(0.0, 0.0, 0.0), return_world_frame=True, - only_cast_fixed=args.fixed, draw_debug=True, ) diff --git a/genesis/sensors/raycaster/patterns.py b/genesis/sensors/raycaster/patterns.py index 9fe77ef1c..a4cf1629f 100644 --- a/genesis/sensors/raycaster/patterns.py +++ b/genesis/sensors/raycaster/patterns.py @@ -173,8 +173,8 @@ class SphericalPattern(RaycastPattern): def __init__( self, - fov: tuple[float | tuple[float, float], float | tuple[float, float]] = (360.0, 30.0), - n_points: tuple[int, int] = (64, 128), + fov: tuple[float | tuple[float, float], float | tuple[float, float]] = (360.0, 60.0), + n_points: tuple[int, int] = (128, 64), angular_resolution: tuple[float | None, float | None] = (None, None), angles: tuple[Sequence[float] | None, Sequence[float] | None] = (None, None), ): diff --git a/genesis/sensors/raycaster/raycaster.py b/genesis/sensors/raycaster/raycaster.py index 652b0a1b1..9f054e29d 100644 --- a/genesis/sensors/raycaster/raycaster.py +++ b/genesis/sensors/raycaster/raycaster.py @@ -267,8 +267,6 @@ def kernel_cast_rays( node_stack[stack_idx + 1] = node.right stack_idx += 2 - # print(f"Ray {i_p}: hit_face={hit_face}") - # --- 3. Process Hit Result --- # The format of output_hits is: [sensor1 points][sensor1 ranges][sensor2 points][sensor2 ranges]... i_p_sensor = i_p - sensor_point_offsets[i_s] @@ -322,9 +320,6 @@ class RaycasterOptions(RigidSensorOptionsMixin, SensorOptions): The value to return for no hit. Defaults to max_range if not specified. return_world_frame : bool, optional Whether to return points in the world frame. Defaults to False (local frame). - only_cast_fixed : bool, optional - Whether to only cast rays on fixed geoms. Defaults to False. This is a shared option, so the value of this - option for the **first** Raycaster sensor will be the behavior for **all** Raycaster sensors. debug_sphere_radius: float, optional The radius of each debug sphere drawn in the scene. Defaults to 0.02. debug_ray_start_color: float, optional @@ -338,7 +333,6 @@ class RaycasterOptions(RigidSensorOptionsMixin, SensorOptions): max_range: float = 20.0 no_hit_value: float = Field(default_factory=lambda data: data["max_range"]) return_world_frame: bool = False - only_cast_fixed: bool = False debug_sphere_radius: float = 0.02 debug_ray_start_color: tuple[float, float, float, float] = (0.5, 0.5, 1.0, 1.0) @@ -357,7 +351,7 @@ def model_post_init(self, _): class RaycasterSharedMetadata(RigidSensorMetadataMixin, SharedSensorMetadata): bvh: LBVH | None = None aabb: AABB | None = None - only_cast_fixed: bool = False + needs_aabb_update: bool = False map_faces: Any | None = None n_faces: int = 0 @@ -406,14 +400,6 @@ def __init__( def _build_bvh(cls, shared_metadata: RaycasterSharedMetadata): n_faces = shared_metadata.solver.faces_info.geom_idx.shape[0] torch_map_faces = torch.arange(n_faces, dtype=torch.int32, device=gs.device) - if shared_metadata.only_cast_fixed: - # count the number of faces in a fixed geoms - geom_is_fixed = ti_to_torch(shared_metadata.solver.geoms_info.is_fixed) - faces_geom = ti_to_torch(shared_metadata.solver.faces_info.geom_idx) - n_faces = torch.sum(geom_is_fixed[faces_geom]).item() - if n_faces == 0: - gs.raise_exception("No fixed geoms found in the scene.") - torch_map_faces = torch.where(geom_is_fixed[faces_geom])[0] shared_metadata.map_faces = ti.field(ti.i32, (n_faces)) shared_metadata.map_faces.from_torch(torch_map_faces) @@ -444,13 +430,15 @@ def build(self): # first lidar sensor initialization: build aabb and bvh if self._shared_metadata.bvh is None: + geom_is_fixed = ti_to_torch(self._shared_metadata.solver.geoms_info.is_fixed) + self._shared_metadata.needs_aabb_update = bool((~geom_is_fixed).any().item()) + self._shared_metadata.output_hits = torch.empty( (self._manager._sim._B, 0), device=gs.device, dtype=gs.tc_float ) self._shared_metadata.sensor_cache_offsets = concat_with_tensor( self._shared_metadata.sensor_cache_offsets, 0 ) - self._shared_metadata.only_cast_fixed = self._options.only_cast_fixed # set for first only self._build_bvh(self._shared_metadata) self._shared_metadata.patterns.append(self._options.pattern) @@ -515,7 +503,7 @@ def _get_cache_dtype(cls) -> torch.dtype: def _update_shared_ground_truth_cache( cls, shared_metadata: RaycasterSharedMetadata, shared_ground_truth_cache: torch.Tensor ): - if not shared_metadata.only_cast_fixed: + if not shared_metadata.needs_aabb_update: rigid_solver_decomp.kernel_update_all_verts( geoms_state=shared_metadata.solver.geoms_state, verts_info=shared_metadata.solver.verts_info, @@ -601,16 +589,13 @@ def _draw_debug(self, context: "RasterizerContext"): color=self._options.debug_ray_start_color, ) ) - - for i_point in range(points.shape[0]): - self.debug_objects.append( - context.draw_debug_sphere( - points[i_point], - radius=self._options.debug_sphere_radius, - color=self._options.debug_ray_hit_color, - ) + self.debug_objects.append( + context.draw_debug_spheres( + points, + radius=self._options.debug_sphere_radius, + color=self._options.debug_ray_hit_color, ) - + ) else: context.update_debug_objects([self.debug_objects[0]], trans_to_T(ray_starts).unsqueeze(0)) - context.update_debug_objects(self.debug_objects[1:], trans_to_T(points).unsqueeze(0)) + context.update_debug_objects([self.debug_objects[1]], trans_to_T(points).unsqueeze(0)) diff --git a/test_sensor_draw.py b/test_sensor_draw.py new file mode 100644 index 000000000..ea3c9395d --- /dev/null +++ b/test_sensor_draw.py @@ -0,0 +1,94 @@ +import genesis as gs + + +def test_sensors_draw_debug(png_snapshot, n_envs): + """Test that sensor debug drawing works correctly and renders visible debug elements.""" + CAM_RES = (640, 480) + + scene = gs.Scene( + viewer_options=gs.options.ViewerOptions( + camera_pos=(2.0, 2.0, 2.0), + camera_lookat=(0.0, 0.0, 0.2), + camera_fov=30, + res=CAM_RES, + ), + profiling_options=gs.options.ProfilingOptions(show_FPS=False), + show_viewer=True, + ) + + scene.add_entity(gs.morphs.Plane()) + + floating_box = scene.add_entity( + gs.morphs.Box( + size=(0.1, 0.1, 0.1), + pos=(0.0, 0.0, 0.5), + fixed=True, + ) + ) + scene.add_sensor( + gs.sensors.IMU( + entity_idx=floating_box.idx, + pos_offset=(0.0, 0.0, 0.1), + draw_debug=True, + ) + ) + + ground_box = scene.add_entity( + gs.morphs.Box( + size=(0.4, 0.2, 0.1), + pos=(-0.25, 0.0, 0.05), + ) + ) + scene.add_sensor( + gs.sensors.Contact( + entity_idx=ground_box.idx, + draw_debug=True, + debug_sphere_radius=0.08, + ) + ) + scene.add_sensor( + gs.sensors.ContactForce( + entity_idx=ground_box.idx, + draw_debug=True, + debug_scale=0.01, + ) + ) + scene.add_sensor( + gs.sensors.Raycaster( + pattern=gs.sensors.raycaster.GridPattern( + resolution=0.2, + size=(0.4, 0.4), + direction=(0.0, 0.0, -1.0), + ), + entity_idx=floating_box.idx, + pos_offset=(0.2, 0.0, -0.1), + return_world_frame=True, + draw_debug=True, + ) + ) + scene.add_sensor( + gs.sensors.Raycaster( + pattern=gs.sensors.raycaster.SphericalPattern( + n_points=(6, 6), + fov=(60.0, (-120.0, -60.0)), + ), + entity_idx=floating_box.idx, + pos_offset=(0.0, 0.5, 0.0), + return_world_frame=False, + draw_debug=True, + debug_sphere_radius=0.01, + debug_ray_start_color=(1.0, 1.0, 0.5, 1.0), + debug_ray_hit_color=(0.5, 1.0, 1.0, 1.0), + ) + ) + + scene.build(n_envs=n_envs) + + for _ in range(5): + scene.step() + + +if __name__ == "__main__": + gs.init(backend=gs.cpu) + # test_sensors_draw_debug(None, 0) + test_sensors_draw_debug(None, 2) diff --git a/tests/test_sensors.py b/tests/test_sensors.py index 8c7283384..846ecbd40 100644 --- a/tests/test_sensors.py +++ b/tests/test_sensors.py @@ -252,10 +252,9 @@ def test_rigid_tactile_sensors_gravity_force(show_viewer, tol, n_envs): @pytest.mark.required @pytest.mark.parametrize("n_envs", [0, 2]) -@pytest.mark.parametrize("only_cast_fixed", [False, True]) -def test_raycaster_hits(show_viewer, tol, n_envs, only_cast_fixed): +def test_raycaster_hits(show_viewer, tol, n_envs): """Test if the Raycaster sensor with GridPattern rays pointing to ground returns the correct distance.""" - EXPECTED_DISTANCE = 0.7 + EXPECTED_DISTANCE = 1.2 NUM_RAYS_XY = 3 BOX_HEIGHT = 0.2 SPHERE_POS = (4.0, 0.0, 1.0) @@ -292,7 +291,6 @@ def test_raycaster_hits(show_viewer, tol, n_envs, only_cast_fixed): entity_idx=grid_sensor_box.idx, pos_offset=(0.0, 0.0, -BOX_HEIGHT), return_world_frame=True, - only_cast_fixed=only_cast_fixed, draw_debug=True, ) ) @@ -311,7 +309,6 @@ def test_raycaster_hits(show_viewer, tol, n_envs, only_cast_fixed): ), entity_idx=spherical_sensor.idx, return_world_frame=False, - only_cast_fixed=only_cast_fixed, ) ) @@ -327,11 +324,10 @@ def test_raycaster_hits(show_viewer, tol, n_envs, only_cast_fixed): assert grid_distances.shape == spherical_distances.shape == expected_shape grid_distance_min = grid_distances.min() - if not only_cast_fixed: - assert grid_distances.min() < EXPECTED_DISTANCE - tol, "Raycaster grid pattern should have hit obstacle" - ground_hit_mask = grid_distances > grid_distance_min + tol - grid_hits = grid_hits[ground_hit_mask] - grid_distances = grid_distances[ground_hit_mask] + assert grid_distances.min() < EXPECTED_DISTANCE - tol, "Raycaster grid pattern should have hit obstacle" + ground_hit_mask = grid_distances > grid_distance_min + tol + grid_hits = grid_hits[ground_hit_mask] + grid_distances = grid_distances[ground_hit_mask] assert_allclose( grid_hits[..., 2], @@ -352,15 +348,9 @@ def test_raycaster_hits(show_viewer, tol, n_envs, only_cast_fixed): err_msg=f"Raycaster spherical pattern should measure {EXPECTED_DISTANCE}m to the sphere around it", ) - if not only_cast_fixed: - for _ in range(5): - scene.step() - - assert grid_raycaster.read().distances.min() > grid_distance_min, "Raycaster should hit falling obstacle" - @pytest.mark.required -@pytest.mark.parametrize("n_envs", [2]) +@pytest.mark.parametrize("n_envs", [0, 2]) def test_sensors_draw_debug(png_snapshot, n_envs): """Test that sensor debug drawing works correctly and renders visible debug elements.""" CAM_RES = (640, 480) @@ -423,7 +413,6 @@ def test_sensors_draw_debug(png_snapshot, n_envs): entity_idx=floating_box.idx, pos_offset=(0.2, 0.0, -0.1), return_world_frame=True, - only_cast_fixed=False, draw_debug=True, ) ) @@ -436,7 +425,6 @@ def test_sensors_draw_debug(png_snapshot, n_envs): entity_idx=floating_box.idx, pos_offset=(0.0, 0.5, 0.0), return_world_frame=False, - only_cast_fixed=True, draw_debug=True, debug_sphere_radius=0.01, debug_ray_start_color=(1.0, 1.0, 0.5, 1.0), @@ -446,7 +434,7 @@ def test_sensors_draw_debug(png_snapshot, n_envs): scene.build(n_envs=n_envs) - for _ in range(5): + for _ in range(10): scene.step() pyrender_viewer = scene.visualizer.viewer._pyrender_viewer diff --git a/tests/utils.py b/tests/utils.py index e8c0c5640..b2a875e0f 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -211,7 +211,7 @@ def get_hf_dataset( continue ext = path.suffix.lower() - if not ext in (URDF_FORMAT, MJCF_FORMAT, *IMAGE_EXTENSIONS, *MESH_EXTENSIONS): + if ext not in (URDF_FORMAT, MJCF_FORMAT, *IMAGE_EXTENSIONS, *MESH_EXTENSIONS): continue has_files = True @@ -223,19 +223,19 @@ def get_hf_dataset( try: ET.parse(path) except ET.ParseError as e: - raise HTTPError(f"Impossible to parse XML file.") from e + raise HTTPError("Impossible to parse XML file.") from e elif path.suffix.lower() in IMAGE_EXTENSIONS: try: Image.open(path) except UnidentifiedImageError as e: - raise HTTPError(f"Impossible to parse Image file.") from e + raise HTTPError("Impossible to parse Image file.") from e elif path.suffix.lower() in MESH_EXTENSIONS: # TODO: Validating mesh files is more tricky. Ignoring them for now. pass if not has_files: raise HTTPError("No file downloaded.") - except (HTTPError, FileNotFoundError) as e: + except (HTTPError, FileNotFoundError): if i == num_retry - 1: raise print(f"Failed to download assets from HuggingFace dataset. Trying again in {retry_delay}s...") From 76fb95e309e71c7155df90643e87815174cba536 Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Mon, 6 Oct 2025 15:55:33 -0400 Subject: [PATCH 16/28] minor fixes --- genesis/sensors/contact_force.py | 8 ++++++-- genesis/sensors/raycaster/raycaster.py | 2 +- tests/test_sensors.py | 5 +++-- tests/utils.py | 2 +- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/genesis/sensors/contact_force.py b/genesis/sensors/contact_force.py index 48a801ea1..30a55c6ab 100644 --- a/genesis/sensors/contact_force.py +++ b/genesis/sensors/contact_force.py @@ -234,7 +234,7 @@ class ContactForceSensorMetadata(RigidSensorMetadataMixin, NoisySensorMetadataMi min_force: torch.Tensor = make_tensor_field((0, 3)) max_force: torch.Tensor = make_tensor_field((0, 3)) - output_forces: torch.Tensor = make_tensor_field((0, 0)) + output_forces: torch.Tensor = make_tensor_field((0, 0)) # FIXME: remove once we have contiguous cache slices @register_sensor(ContactForceSensorOptions, ContactForceSensorMetadata, tuple) @@ -299,7 +299,11 @@ def _update_shared_ground_truth_cache( all_contacts = shared_metadata.solver.collider.get_contacts(as_tensor=True, to_torch=True) force, link_a, link_b = all_contacts["force"], all_contacts["link_a"], all_contacts["link_b"] - shared_ground_truth_cache.fill_(0.0) + if not shared_ground_truth_cache.is_contiguous(): + shared_metadata.output_forces.fill_(0.0) + else: + shared_ground_truth_cache.fill_(0.0) + if link_a.shape[-1] == 0: return # no contacts diff --git a/genesis/sensors/raycaster/raycaster.py b/genesis/sensors/raycaster/raycaster.py index 9f054e29d..170dc2bf0 100644 --- a/genesis/sensors/raycaster/raycaster.py +++ b/genesis/sensors/raycaster/raycaster.py @@ -373,7 +373,7 @@ class RaycasterSharedMetadata(RigidSensorMetadataMixin, SharedSensorMetadata): sensor_cache_offsets: torch.Tensor = make_tensor_field((0,), dtype_factory=lambda: gs.tc_int) sensor_point_offsets: torch.Tensor = make_tensor_field((0,), dtype_factory=lambda: gs.tc_int) sensor_point_counts: torch.Tensor = make_tensor_field((0,), dtype_factory=lambda: gs.tc_int) - output_hits: torch.Tensor = make_tensor_field((0, 0)) + output_hits: torch.Tensor = make_tensor_field((0, 0)) # FIXME: remove once we have contiguous cache slices class RaycasterData(NamedTuple): diff --git a/tests/test_sensors.py b/tests/test_sensors.py index 846ecbd40..d4f92072a 100644 --- a/tests/test_sensors.py +++ b/tests/test_sensors.py @@ -394,6 +394,7 @@ def test_sensors_draw_debug(png_snapshot, n_envs): entity_idx=ground_box.idx, draw_debug=True, debug_sphere_radius=0.08, + debug_color=(1.0, 0.5, 1.0, 1.0), ) ) scene.add_sensor( @@ -427,14 +428,14 @@ def test_sensors_draw_debug(png_snapshot, n_envs): return_world_frame=False, draw_debug=True, debug_sphere_radius=0.01, - debug_ray_start_color=(1.0, 1.0, 0.5, 1.0), + debug_ray_start_color=(1.0, 1.0, 0.0, 1.0), debug_ray_hit_color=(0.5, 1.0, 1.0, 1.0), ) ) scene.build(n_envs=n_envs) - for _ in range(10): + for _ in range(5): scene.step() pyrender_viewer = scene.visualizer.viewer._pyrender_viewer diff --git a/tests/utils.py b/tests/utils.py index b2a875e0f..83243d850 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -32,7 +32,7 @@ DEFAULT_BRANCH_NAME = "main" HUGGINGFACE_ASSETS_REVISION = "16e4eae0024312b84518f4b555dd630d6b34095a" -HUGGINGFACE_SNAPSHOT_REVISION = "0db0ca5941d6b64c58d9e9711abe62e3a50738ac" +HUGGINGFACE_SNAPSHOT_REVISION = "0113767941691c02205350b89bf3a087d8a2b617" MESH_EXTENSIONS = (".mtl", *MESH_FORMATS, *GLTF_FORMATS, *USD_FORMATS) IMAGE_EXTENSIONS = (".png", ".jpg") From 565fc8f3dfc6c8d4adbe74becefd259361bb33c7 Mon Sep 17 00:00:00 2001 From: Trinity Chung Date: Mon, 6 Oct 2025 17:34:11 -0400 Subject: [PATCH 17/28] rm examples from ignore list --- tests/test_examples.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/test_examples.py b/tests/test_examples.py index 9dbc0229b..ecaf27fc9 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -1,8 +1,6 @@ import os import sys import subprocess -import shutil -import shlex from pathlib import Path import pytest @@ -27,8 +25,6 @@ "multi_gpu.py", "fem_cube_linked_with_arm.py", # FIXME: segfault on exit "single_franka_batch_render.py", # FIXME: segfault on exit - "imu_franka.py", # FIXME: broken - "contact_force_go2.py", # FIXME: broken "cut_dragon.py", # FIXME: Only supported on Linux } From f7f58a0397f87a345253c92e1ab5dcdcc0f34a38 Mon Sep 17 00:00:00 2001 From: Alexis Duburcq Date: Tue, 7 Oct 2025 09:37:38 +0200 Subject: [PATCH 18/28] cleanup rendering unit tests. --- test_sensor_draw.py | 94 ----------------------- tests/test_render.py | 174 ++++++++++++++++++++++++++++++++++++++---- tests/test_sensors.py | 98 ------------------------ tests/utils.py | 2 +- 4 files changed, 162 insertions(+), 206 deletions(-) delete mode 100644 test_sensor_draw.py diff --git a/test_sensor_draw.py b/test_sensor_draw.py deleted file mode 100644 index ea3c9395d..000000000 --- a/test_sensor_draw.py +++ /dev/null @@ -1,94 +0,0 @@ -import genesis as gs - - -def test_sensors_draw_debug(png_snapshot, n_envs): - """Test that sensor debug drawing works correctly and renders visible debug elements.""" - CAM_RES = (640, 480) - - scene = gs.Scene( - viewer_options=gs.options.ViewerOptions( - camera_pos=(2.0, 2.0, 2.0), - camera_lookat=(0.0, 0.0, 0.2), - camera_fov=30, - res=CAM_RES, - ), - profiling_options=gs.options.ProfilingOptions(show_FPS=False), - show_viewer=True, - ) - - scene.add_entity(gs.morphs.Plane()) - - floating_box = scene.add_entity( - gs.morphs.Box( - size=(0.1, 0.1, 0.1), - pos=(0.0, 0.0, 0.5), - fixed=True, - ) - ) - scene.add_sensor( - gs.sensors.IMU( - entity_idx=floating_box.idx, - pos_offset=(0.0, 0.0, 0.1), - draw_debug=True, - ) - ) - - ground_box = scene.add_entity( - gs.morphs.Box( - size=(0.4, 0.2, 0.1), - pos=(-0.25, 0.0, 0.05), - ) - ) - scene.add_sensor( - gs.sensors.Contact( - entity_idx=ground_box.idx, - draw_debug=True, - debug_sphere_radius=0.08, - ) - ) - scene.add_sensor( - gs.sensors.ContactForce( - entity_idx=ground_box.idx, - draw_debug=True, - debug_scale=0.01, - ) - ) - scene.add_sensor( - gs.sensors.Raycaster( - pattern=gs.sensors.raycaster.GridPattern( - resolution=0.2, - size=(0.4, 0.4), - direction=(0.0, 0.0, -1.0), - ), - entity_idx=floating_box.idx, - pos_offset=(0.2, 0.0, -0.1), - return_world_frame=True, - draw_debug=True, - ) - ) - scene.add_sensor( - gs.sensors.Raycaster( - pattern=gs.sensors.raycaster.SphericalPattern( - n_points=(6, 6), - fov=(60.0, (-120.0, -60.0)), - ), - entity_idx=floating_box.idx, - pos_offset=(0.0, 0.5, 0.0), - return_world_frame=False, - draw_debug=True, - debug_sphere_radius=0.01, - debug_ray_start_color=(1.0, 1.0, 0.5, 1.0), - debug_ray_hit_color=(0.5, 1.0, 1.0, 1.0), - ) - ) - - scene.build(n_envs=n_envs) - - for _ in range(5): - scene.step() - - -if __name__ == "__main__": - gs.init(backend=gs.cpu) - # test_sensors_draw_debug(None, 0) - test_sensors_draw_debug(None, 2) diff --git a/tests/test_render.py b/tests/test_render.py index f69fff0f3..36a66d5af 100644 --- a/tests/test_render.py +++ b/tests/test_render.py @@ -33,12 +33,11 @@ class RENDERER_TYPE(enum.IntEnum): def renderer(renderer_type): if renderer_type == RENDERER_TYPE.RASTERIZER: return gs.renderers.Rasterizer() - elif renderer_type == RENDERER_TYPE.RAYTRACER: + if renderer_type == RENDERER_TYPE.RAYTRACER: return gs.renderers.RayTracer() - else: - return gs.renderers.BatchRenderer( - use_rasterizer=renderer_type == RENDERER_TYPE.BATCHRENDER_RASTERIZER, - ) + return gs.renderers.BatchRenderer( + use_rasterizer=renderer_type == RENDERER_TYPE.BATCHRENDER_RASTERIZER, + ) @pytest.fixture(scope="function") @@ -114,15 +113,33 @@ def test_render_api(show_viewer, renderer_type, renderer): "renderer_type", [RENDERER_TYPE.RASTERIZER, RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER], ) -def test_deterministic(tmp_path, show_viewer, tol): +def test_deterministic(tmp_path, renderer_type, renderer, show_viewer, tol): scene = gs.Scene( vis_options=gs.options.VisOptions( # rendered_envs_idx=(0, 1, 2), env_separate_rigid=False, ), + renderer=renderer, show_viewer=show_viewer, show_FPS=False, ) + if renderer_type in (RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER): + scene.add_light( + pos=(0.0, 0.0, 1.5), + dir=(1.0, 1.0, -2.0), + directional=True, + castshadow=True, + cutoff=45.0, + intensity=0.5, + ) + scene.add_light( + pos=(4.0, -4.0, 4.0), + dir=(-1.0, 1.0, -1.0), + directional=False, + castshadow=True, + cutoff=45.0, + intensity=0.5, + ) plane = scene.add_entity( morph=gs.morphs.Plane(), surface=gs.surfaces.Aluminium( @@ -264,7 +281,7 @@ def test_deterministic(tmp_path, show_viewer, tol): rgb_array, *_ = cam.render( rgb=True, depth=False, segmentation=False, colorize_seg=False, normal=False, force_render=True ) - assert np.max(np.std(rgb_array.reshape((-1, 3)), axis=0)) > 10.0 + assert tensor_to_array(rgb_array).reshape((-1, 3)).astype(np.float32).std(axis=0).max() > 10.0 robots_rgb_arrays.append(rgb_array) steps_rgb_arrays.append(robots_rgb_arrays) @@ -296,6 +313,8 @@ def test_render_api_advanced(tmp_path, n_envs, show_viewer, png_snapshot, render shadow=(renderer_type != RENDERER_TYPE.RASTERIZER), ), renderer=renderer, + show_viewer=False, + show_FPS=False, ) plane = scene.add_entity( morph=gs.morphs.Plane(), @@ -522,6 +541,7 @@ def test_segmentation_map(segmentation_level, particle_mode, renderer_type, rend ), renderer=renderer, show_viewer=False, + show_FPS=False, ) robot = scene.add_entity( @@ -589,9 +609,10 @@ def test_segmentation_map(segmentation_level, particle_mode, renderer_type, rend assert_array_equal(np.sort(np.unique(seg.flat)), np.arange(0, seg_num)) -@pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.RASTERIZER]) +@pytest.mark.required @pytest.mark.parametrize("n_envs", [0, 2]) -def test_camera_follow_entity(n_envs, show_viewer): +@pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.RASTERIZER]) +def test_camera_follow_entity(n_envs, renderer, show_viewer): CAM_RES = (100, 100) scene = gs.Scene( @@ -599,7 +620,9 @@ def test_camera_follow_entity(n_envs, show_viewer): rendered_envs_idx=[1] if n_envs else None, segmentation_level="entity", ), + renderer=renderer, show_viewer=False, + show_FPS=False, ) for pos in ((1.0, 0.0, 0.0), (-1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, -1.0, 0.0)): obj = scene.add_entity( @@ -775,18 +798,19 @@ def test_point_cloud(renderer_type, renderer, show_viewer): @pytest.mark.required @pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.RASTERIZER]) -def test_draw_debug(show_viewer): +def test_draw_debug(renderer, show_viewer): if "GS_DISABLE_OFFSCREEN_MARKERS" in os.environ: pytest.skip("Offscreen rendering of markers is forcibly disabled. Skipping...") scene = gs.Scene( + renderer=renderer, show_viewer=show_viewer, + show_FPS=False, ) cam = scene.add_camera( pos=(3.5, 0.5, 2.5), lookat=(0.0, 0.0, 0.5), up=(0.0, 0.0, 1.0), - fov=40, res=(640, 640), GUI=show_viewer, ) @@ -842,10 +866,112 @@ def test_draw_debug(show_viewer): assert_allclose(np.std(rgb_array.reshape((-1, 3)), axis=0), 0.0, tol=gs.EPS) +@pytest.mark.required +@pytest.mark.parametrize("n_envs", [0, 2]) +@pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.RASTERIZER]) +@pytest.mark.skipif(not IS_INTERACTIVE_VIEWER_AVAILABLE, reason="Interactive viewer not supported on this platform.") +def test_sensors_draw_debug(n_envs, renderer, png_snapshot): + """Test that sensor debug drawing works correctly and renders visible debug elements.""" + CAM_RES = (640, 480) + + scene = gs.Scene( + viewer_options=gs.options.ViewerOptions( + camera_pos=(2.0, 2.0, 2.0), + camera_lookat=(0.0, 0.0, 0.2), + res=CAM_RES, + ), + profiling_options=gs.options.ProfilingOptions( + show_FPS=False, + ), + renderer=renderer, + show_viewer=True, + ) + + scene.add_entity(gs.morphs.Plane()) + + floating_box = scene.add_entity( + gs.morphs.Box( + size=(0.1, 0.1, 0.1), + pos=(0.0, 0.0, 0.5), + fixed=True, + ) + ) + scene.add_sensor( + gs.sensors.IMU( + entity_idx=floating_box.idx, + pos_offset=(0.0, 0.0, 0.1), + draw_debug=True, + ) + ) + + ground_box = scene.add_entity( + gs.morphs.Box( + size=(0.4, 0.2, 0.1), + pos=(-0.25, 0.0, 0.05), + ) + ) + scene.add_sensor( + gs.sensors.Contact( + entity_idx=ground_box.idx, + draw_debug=True, + debug_sphere_radius=0.08, + debug_color=(1.0, 0.5, 1.0, 1.0), + ) + ) + scene.add_sensor( + gs.sensors.ContactForce( + entity_idx=ground_box.idx, + draw_debug=True, + debug_scale=0.01, + ) + ) + scene.add_sensor( + gs.sensors.Raycaster( + pattern=gs.sensors.raycaster.GridPattern( + resolution=0.2, + size=(0.4, 0.4), + direction=(0.0, 0.0, -1.0), + ), + entity_idx=floating_box.idx, + pos_offset=(0.2, 0.0, -0.1), + return_world_frame=True, + draw_debug=True, + ) + ) + scene.add_sensor( + gs.sensors.Raycaster( + pattern=gs.sensors.raycaster.SphericalPattern( + n_points=(6, 6), + fov=(60.0, (-120.0, -60.0)), + ), + entity_idx=floating_box.idx, + pos_offset=(0.0, 0.5, 0.0), + return_world_frame=False, + draw_debug=True, + debug_sphere_radius=0.01, + debug_ray_start_color=(1.0, 1.0, 0.0, 1.0), + debug_ray_hit_color=(0.5, 1.0, 1.0, 1.0), + ) + ) + + scene.build(n_envs=n_envs) + + for _ in range(5): + scene.step() + + pyrender_viewer = scene.visualizer.viewer._pyrender_viewer + assert pyrender_viewer.is_active + rgb_arr, *_ = pyrender_viewer.render_offscreen( + pyrender_viewer._camera_node, pyrender_viewer._renderer, rgb=True, depth=False, seg=False, normal=False + ) + + assert rgb_array_to_png_bytes(rgb_arr) == png_snapshot + + @pytest.mark.required @pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.RASTERIZER]) @pytest.mark.skipif(not IS_INTERACTIVE_VIEWER_AVAILABLE, reason="Interactive viewer not supported on this platform.") -def test_interactive_viewer_key_press(tmp_path, monkeypatch, png_snapshot, show_viewer): +def test_interactive_viewer_key_press(tmp_path, monkeypatch, renderer, png_snapshot, show_viewer): IMAGE_FILENAME = tmp_path / "screenshot.png" # Mock 'get_save_filename' to avoid poping up an interactive dialog @@ -878,7 +1004,9 @@ def on_key_press(self, symbol: int, modifiers: int): # 'EventLoop.run() must be called from the same thread that imports pyglet.app'. run_in_thread=(sys.platform == "linux"), ), + renderer=renderer, show_viewer=True, + show_FPS=False, ) cube = scene.add_entity( gs.morphs.Box( @@ -922,7 +1050,7 @@ def on_key_press(self, symbol: int, modifiers: int): @pytest.mark.parametrize( "renderer_type", - [RENDERER_TYPE.RASTERIZER], + [RENDERER_TYPE.RASTERIZER, RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER], ) def test_render_planes(tmp_path, png_snapshot, renderer): CAM_RES = (256, 256) @@ -936,7 +1064,26 @@ def test_render_planes(tmp_path, png_snapshot, renderer): ): scene = gs.Scene( renderer=renderer, + show_viewer=False, + show_FPS=False, ) + if renderer_type in (RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER): + scene.add_light( + pos=(0.0, 0.0, 1.5), + dir=(1.0, 1.0, -2.0), + directional=True, + castshadow=True, + cutoff=45.0, + intensity=0.5, + ) + scene.add_light( + pos=(4.0, -4.0, 4.0), + dir=(-1.0, 1.0, -1.0), + directional=False, + castshadow=True, + cutoff=45.0, + intensity=0.5, + ) plane = scene.add_entity( gs.morphs.Plane(plane_size=plane_size, tile_size=tile_size), ) @@ -1002,6 +1149,7 @@ def test_batch_deformable_render(tmp_path, monkeypatch, png_snapshot): visualize_sph_boundary=True, ), show_viewer=True, + show_FPS=False, ) plane = scene.add_entity( diff --git a/tests/test_sensors.py b/tests/test_sensors.py index d4f92072a..96892852f 100644 --- a/tests/test_sensors.py +++ b/tests/test_sensors.py @@ -347,101 +347,3 @@ def test_raycaster_hits(show_viewer, tol, n_envs): tol=1e-2, # since sphere mesh is discretized, we need a larger tolerance here err_msg=f"Raycaster spherical pattern should measure {EXPECTED_DISTANCE}m to the sphere around it", ) - - -@pytest.mark.required -@pytest.mark.parametrize("n_envs", [0, 2]) -def test_sensors_draw_debug(png_snapshot, n_envs): - """Test that sensor debug drawing works correctly and renders visible debug elements.""" - CAM_RES = (640, 480) - - scene = gs.Scene( - viewer_options=gs.options.ViewerOptions( - camera_pos=(2.0, 2.0, 2.0), - camera_lookat=(0.0, 0.0, 0.2), - camera_fov=30, - res=CAM_RES, - ), - profiling_options=gs.options.ProfilingOptions(show_FPS=False), - show_viewer=True, - ) - - scene.add_entity(gs.morphs.Plane()) - - floating_box = scene.add_entity( - gs.morphs.Box( - size=(0.1, 0.1, 0.1), - pos=(0.0, 0.0, 0.5), - fixed=True, - ) - ) - scene.add_sensor( - gs.sensors.IMU( - entity_idx=floating_box.idx, - pos_offset=(0.0, 0.0, 0.1), - draw_debug=True, - ) - ) - - ground_box = scene.add_entity( - gs.morphs.Box( - size=(0.4, 0.2, 0.1), - pos=(-0.25, 0.0, 0.05), - ) - ) - scene.add_sensor( - gs.sensors.Contact( - entity_idx=ground_box.idx, - draw_debug=True, - debug_sphere_radius=0.08, - debug_color=(1.0, 0.5, 1.0, 1.0), - ) - ) - scene.add_sensor( - gs.sensors.ContactForce( - entity_idx=ground_box.idx, - draw_debug=True, - debug_scale=0.01, - ) - ) - scene.add_sensor( - gs.sensors.Raycaster( - pattern=gs.sensors.raycaster.GridPattern( - resolution=0.2, - size=(0.4, 0.4), - direction=(0.0, 0.0, -1.0), - ), - entity_idx=floating_box.idx, - pos_offset=(0.2, 0.0, -0.1), - return_world_frame=True, - draw_debug=True, - ) - ) - scene.add_sensor( - gs.sensors.Raycaster( - pattern=gs.sensors.raycaster.SphericalPattern( - n_points=(6, 6), - fov=(60.0, (-120.0, -60.0)), - ), - entity_idx=floating_box.idx, - pos_offset=(0.0, 0.5, 0.0), - return_world_frame=False, - draw_debug=True, - debug_sphere_radius=0.01, - debug_ray_start_color=(1.0, 1.0, 0.0, 1.0), - debug_ray_hit_color=(0.5, 1.0, 1.0, 1.0), - ) - ) - - scene.build(n_envs=n_envs) - - for _ in range(5): - scene.step() - - pyrender_viewer = scene.visualizer.viewer._pyrender_viewer - assert pyrender_viewer.is_active - rgb_arr, *_ = pyrender_viewer.render_offscreen( - pyrender_viewer._camera_node, pyrender_viewer._renderer, rgb=True, depth=False, seg=False, normal=False - ) - - assert rgb_array_to_png_bytes(rgb_arr) == png_snapshot diff --git a/tests/utils.py b/tests/utils.py index 83243d850..07d9e246b 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -32,7 +32,7 @@ DEFAULT_BRANCH_NAME = "main" HUGGINGFACE_ASSETS_REVISION = "16e4eae0024312b84518f4b555dd630d6b34095a" -HUGGINGFACE_SNAPSHOT_REVISION = "0113767941691c02205350b89bf3a087d8a2b617" +HUGGINGFACE_SNAPSHOT_REVISION = "fdcaa27c91a8d59abf9a0c9121767c6c4a8d4cb6" MESH_EXTENSIONS = (".mtl", *MESH_FORMATS, *GLTF_FORMATS, *USD_FORMATS) IMAGE_EXTENSIONS = (".png", ".jpg") From d4ec32b158680562727bf1fff9b0c8170c76b33a Mon Sep 17 00:00:00 2001 From: Alexis Duburcq Date: Tue, 7 Oct 2025 10:02:25 +0200 Subject: [PATCH 19/28] Fix offscreen interactive viewer. --- genesis/vis/visualizer.py | 4 +++- tests/conftest.py | 1 - 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/genesis/vis/visualizer.py b/genesis/vis/visualizer.py index 1f2c14b48..761cc8449 100644 --- a/genesis/vis/visualizer.py +++ b/genesis/vis/visualizer.py @@ -1,3 +1,5 @@ +import os + import genesis as gs from genesis.repr_base import RBC @@ -42,7 +44,7 @@ def __init__(self, scene, show_viewer, vis_options, viewer_options, renderer_opt self._context = RasterizerContext(vis_options) try: - screen_height, _, screen_scale = gs.utils.try_get_display_size() + screen_height, _screen_width, screen_scale = gs.utils.try_get_display_size() self._has_display = True except Exception as e: if show_viewer: diff --git a/tests/conftest.py b/tests/conftest.py index e6cca7734..968084149 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -45,7 +45,6 @@ # It is necessary to configure pyglet in headless mode if necessary before importing Genesis. # Note that environment variables are used instead of global options to ease option propagation to subprocesses. os.environ["PYGLET_HEADLESS"] = "1" - os.environ["GS_VIEWER_ALLOW_OFFSCREEN"] = "1" IS_INTERACTIVE_VIEWER_AVAILABLE = has_display or has_egl From 46ae857c49e04f7dfeadb8a2adb80f128bf3b477 Mon Sep 17 00:00:00 2001 From: Alexis Duburcq Date: Tue, 7 Oct 2025 10:20:17 +0200 Subject: [PATCH 20/28] Fix viewer deadlock in case of missing display. --- .github/workflows/production.yml | 2 +- genesis/ext/pyrender/viewer.py | 4 ++-- genesis/sensors/raycaster/raycaster.py | 2 +- genesis/vis/visualizer.py | 2 -- tests/conftest.py | 1 + 5 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/workflows/production.yml b/.github/workflows/production.yml index 449daac00..3927e9718 100644 --- a/.github/workflows/production.yml +++ b/.github/workflows/production.yml @@ -24,7 +24,7 @@ jobs: WANDB_API_KEY: ${{ secrets.WANDB_API_KEY }} HF_TOKEN: ${{ secrets.HF_TOKEN }} HF_HUB_DOWNLOAD_TIMEOUT: 60 - GENESIS_IMAGE_VER: "1_3" + GENESIS_IMAGE_VER: "1_4" TIMEOUT_MINUTES: 180 FORCE_COLOR: 1 PY_COLORS: 1 diff --git a/genesis/ext/pyrender/viewer.py b/genesis/ext/pyrender/viewer.py index 8dc1ac515..88d56a317 100644 --- a/genesis/ext/pyrender/viewer.py +++ b/genesis/ext/pyrender/viewer.py @@ -1225,8 +1225,8 @@ def get_program(self, vertex_shader, fragment_shader, geometry_shader=None, defi def start(self, auto_refresh=True): import pyglet # For some reason, this is necessary if 'pyglet.window.xlib' fails to import... try: - import pyglet.window.xlib - xlib_exceptions = (pyglet.window.xlib.XlibException,) + import pyglet.window.xlib, pyglet.display.xlib + xlib_exceptions = (pyglet.window.xlib.XlibException, pyglet.display.xlib.NoSuchDisplayException) except ImportError: xlib_exceptions = () diff --git a/genesis/sensors/raycaster/raycaster.py b/genesis/sensors/raycaster/raycaster.py index 170dc2bf0..f2aaf553d 100644 --- a/genesis/sensors/raycaster/raycaster.py +++ b/genesis/sensors/raycaster/raycaster.py @@ -232,7 +232,7 @@ def kernel_cast_rays( if node.left == -1: # is leaf node # A leaf node corresponds to one of the sorted triangles. Find the original triangle index. sorted_leaf_idx = node_idx - (n_triangles - 1) - original_tri_idx = bvh_morton_codes[0, sorted_leaf_idx][1] + original_tri_idx = ti.cast(bvh_morton_codes[0, sorted_leaf_idx][1], ti.i32) i_f = map_faces[original_tri_idx] is_fixed = verts_info.is_fixed[faces_info.verts_idx[i_f][0]] diff --git a/genesis/vis/visualizer.py b/genesis/vis/visualizer.py index 761cc8449..0f88295af 100644 --- a/genesis/vis/visualizer.py +++ b/genesis/vis/visualizer.py @@ -1,5 +1,3 @@ -import os - import genesis as gs from genesis.repr_base import RBC diff --git a/tests/conftest.py b/tests/conftest.py index 968084149..3327e9a6e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -44,6 +44,7 @@ if not has_display and has_egl: # It is necessary to configure pyglet in headless mode if necessary before importing Genesis. # Note that environment variables are used instead of global options to ease option propagation to subprocesses. + pyglet.options["headless"] = True os.environ["PYGLET_HEADLESS"] = "1" IS_INTERACTIVE_VIEWER_AVAILABLE = has_display or has_egl From fa36ee9f9c926d65d19beff713b1815658dd3026 Mon Sep 17 00:00:00 2001 From: Alexis Duburcq Date: Tue, 7 Oct 2025 10:58:55 +0200 Subject: [PATCH 21/28] Fix example unit tests. --- examples/keyboard_teleop.py | 25 ------------------------- examples/sensors/contact_force_go2.py | 23 +++++++++++++++-------- examples/sensors/imu_franka.py | 15 +++++++++++---- examples/sensors/lidar_teleop.py | 9 ++++++++- tests/test_render.py | 2 +- tests/utils.py | 2 +- 6 files changed, 36 insertions(+), 40 deletions(-) diff --git a/examples/keyboard_teleop.py b/examples/keyboard_teleop.py index 9b80c5d74..a8c5c05a2 100644 --- a/examples/keyboard_teleop.py +++ b/examples/keyboard_teleop.py @@ -23,31 +23,6 @@ from scipy.spatial.transform import Rotation as R -class KeyboardDevice: - def __init__(self): - self.pressed_keys = set() - self.lock = threading.Lock() - self.listener = keyboard.Listener(on_press=self.on_press, on_release=self.on_release) - - def start(self): - self.listener.start() - - def stop(self): - self.listener.stop() - self.listener.join() - - def on_press(self, key: keyboard.Key): - with self.lock: - self.pressed_keys.add(key) - - def on_release(self, key: keyboard.Key): - with self.lock: - self.pressed_keys.discard(key) - - def get_cmd(self): - return self.pressed_keys - - class KeyboardDevice: def __init__(self): self.pressed_keys = set() diff --git a/examples/sensors/contact_force_go2.py b/examples/sensors/contact_force_go2.py index 9a9178822..7895f926a 100644 --- a/examples/sensors/contact_force_go2.py +++ b/examples/sensors/contact_force_go2.py @@ -1,4 +1,5 @@ import argparse +import os from tqdm import tqdm @@ -8,11 +9,11 @@ def main(): parser = argparse.ArgumentParser() - parser.add_argument("-dt", "--timestep", type=float, default=1e-2, help="Simulation time step") + parser.add_argument("-dt", "--timestep", type=float, default=0.01, help="Simulation time step") parser.add_argument("-v", "--vis", action="store_true", default=True, help="Show visualization GUI") parser.add_argument("-nv", "--no-vis", action="store_false", dest="vis", help="Disable visualization GUI") parser.add_argument("-c", "--cpu", action="store_true", help="Use CPU instead of GPU") - parser.add_argument("-t", "--seconds", type=float, default=2, help="Number of seconds to simulate") + parser.add_argument("-t", "--seconds", type=float, default=2.0, help="Number of seconds to simulate") parser.add_argument("-f", "--force", action="store_true", default=True, help="Use ContactForceSensor (xyz float)") parser.add_argument("-nf", "--no-force", action="store_false", dest="force", help="Use ContactSensor (boolean)") @@ -23,19 +24,25 @@ def main(): ########################## scene setup ########################## scene = gs.Scene( - sim_options=gs.options.SimOptions(dt=args.timestep), + sim_options=gs.options.SimOptions( + dt=args.timestep, + ), rigid_options=gs.options.RigidOptions( - use_gjk_collision=True, constraint_timeconst=max(0.01, 2 * args.timestep), + use_gjk_collision=True, + ), + vis_options=gs.options.VisOptions( + show_world_frame=True, + ), + profiling_options=gs.options.ProfilingOptions( + show_FPS=False, ), - vis_options=gs.options.VisOptions(show_world_frame=True), - profiling_options=gs.options.ProfilingOptions(show_FPS=False), show_viewer=args.vis, ) scene.add_entity(gs.morphs.Plane()) - foot_link_names = ["FR_foot", "FL_foot", "RR_foot", "RL_foot"] + foot_link_names = ("FR_foot", "FL_foot", "RR_foot", "RL_foot") go2 = scene.add_entity( gs.morphs.URDF( file="urdf/go2/urdf/go2.urdf", @@ -79,7 +86,7 @@ def main(): scene.build() try: - steps = int(args.seconds / args.timestep) + steps = int(args.seconds / args.timestep) if "PYTEST_VERSION" not in os.environ else 5 for _ in tqdm(range(steps)): scene.step() except KeyboardInterrupt: diff --git a/examples/sensors/imu_franka.py b/examples/sensors/imu_franka.py index ab142feb1..6cca4e250 100644 --- a/examples/sensors/imu_franka.py +++ b/examples/sensors/imu_franka.py @@ -1,4 +1,5 @@ import argparse +import os import numpy as np from tqdm import tqdm @@ -21,14 +22,20 @@ def main(): ########################## create a scene ########################## scene = gs.Scene( - sim_options=gs.options.SimOptions(dt=args.timestep), - vis_options=gs.options.VisOptions(show_world_frame=False), + sim_options=gs.options.SimOptions( + dt=args.timestep, + ), + vis_options=gs.options.VisOptions( + show_world_frame=False, + ), viewer_options=gs.options.ViewerOptions( camera_pos=(3.5, 0.0, 2.5), camera_lookat=(0.0, 0.0, 0.5), camera_fov=40, ), - profiling_options=gs.options.ProfilingOptions(show_FPS=False), + profiling_options=gs.options.ProfilingOptions( + show_FPS=False, + ), show_viewer=args.vis, ) @@ -112,7 +119,7 @@ def data_func(): rate = np.deg2rad(2.0) try: - steps = int(args.seconds / args.timestep) + steps = int(args.seconds / args.timestep) if "PYTEST_VERSION" not in os.environ else 5 for i in tqdm(range(steps)): scene.step() diff --git a/examples/sensors/lidar_teleop.py b/examples/sensors/lidar_teleop.py index d90dd83ef..dc71ac908 100644 --- a/examples/sensors/lidar_teleop.py +++ b/examples/sensors/lidar_teleop.py @@ -1,4 +1,5 @@ import argparse +import os import threading import numpy as np @@ -40,7 +41,11 @@ def start(self): self.listener.start() def stop(self): - self.listener.stop() + try: + self.listener.stop() + except NotImplementedError: + # Dummy backend does not implement stop + pass self.listener.join() def on_press(self, key: "keyboard.Key"): @@ -225,6 +230,8 @@ def apply_pose_to_all_envs(pos_np: np.ndarray, quat_np: np.ndarray): apply_pose_to_all_envs(target_pos, euler_to_quat(target_euler)) scene.step() + if "PYTEST_VERSION" in os.environ: + break except KeyboardInterrupt: gs.logger.info("Simulation interrupted, exiting.") finally: diff --git a/tests/test_render.py b/tests/test_render.py index 36a66d5af..9670ea7c4 100644 --- a/tests/test_render.py +++ b/tests/test_render.py @@ -1052,7 +1052,7 @@ def on_key_press(self, symbol: int, modifiers: int): "renderer_type", [RENDERER_TYPE.RASTERIZER, RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER], ) -def test_render_planes(tmp_path, png_snapshot, renderer): +def test_render_planes(tmp_path, png_snapshot, renderer_type, renderer): CAM_RES = (256, 256) for test_idx, (plane_size, tile_size) in enumerate( diff --git a/tests/utils.py b/tests/utils.py index 07d9e246b..0d452e1c9 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -32,7 +32,7 @@ DEFAULT_BRANCH_NAME = "main" HUGGINGFACE_ASSETS_REVISION = "16e4eae0024312b84518f4b555dd630d6b34095a" -HUGGINGFACE_SNAPSHOT_REVISION = "fdcaa27c91a8d59abf9a0c9121767c6c4a8d4cb6" +HUGGINGFACE_SNAPSHOT_REVISION = "c5ba0d9d8eccbccb211295dd1a447c20a84299b9" MESH_EXTENSIONS = (".mtl", *MESH_FORMATS, *GLTF_FORMATS, *USD_FORMATS) IMAGE_EXTENSIONS = (".png", ".jpg") From e8224d644fe72196799adf167590b42b2c9ddb8b Mon Sep 17 00:00:00 2001 From: Alexis Duburcq Date: Tue, 7 Oct 2025 12:16:46 +0200 Subject: [PATCH 22/28] Increase tolerance unit test to avoid flakiness. --- tests/test_integration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_integration.py b/tests/test_integration.py index 1f8dc5f1a..cf25a121e 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -314,4 +314,4 @@ def test_franka_panda_grasp_fem_entity(primitive_type, show_viewer): franka.control_dofs_force(np.array([-1.0, -1.0]), fingers_dof) scene.step() box_pos_post = obj.get_state().pos.mean(dim=-2) - assert_allclose(box_pos_f, box_pos_post, atol=2e-4) + assert_allclose(box_pos_f, box_pos_post, atol=5e-4) From dbba03e0fe82110b5f219dec4abafca6049a493e Mon Sep 17 00:00:00 2001 From: Alexis Duburcq Date: Tue, 7 Oct 2025 18:29:51 +0200 Subject: [PATCH 23/28] Add hugging face Xet deps. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 42fab79f7..5ef0f6c9b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -79,7 +79,7 @@ dev = [ # - 16.0 is causing pytest-xdist to crash in case of failure or skipped tests "pytest-rerunfailures<16.0", "syrupy", - "huggingface_hub", + "huggingface_hub[hf_xet]", "wandb", "ipython", # * Mujoco 3.3.6 made contact islands an opt-out option instead of opt-in, From d37c5daf08a43f56341fa5ed89a3cee7af48ed6f Mon Sep 17 00:00:00 2001 From: Alexis Duburcq Date: Tue, 7 Oct 2025 18:30:47 +0200 Subject: [PATCH 24/28] Cleanup sensor draw debug unit test. --- tests/test_render.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/tests/test_render.py b/tests/test_render.py index 9670ea7c4..956ca539c 100644 --- a/tests/test_render.py +++ b/tests/test_render.py @@ -872,13 +872,14 @@ def test_draw_debug(renderer, show_viewer): @pytest.mark.skipif(not IS_INTERACTIVE_VIEWER_AVAILABLE, reason="Interactive viewer not supported on this platform.") def test_sensors_draw_debug(n_envs, renderer, png_snapshot): """Test that sensor debug drawing works correctly and renders visible debug elements.""" - CAM_RES = (640, 480) - scene = gs.Scene( viewer_options=gs.options.ViewerOptions( camera_pos=(2.0, 2.0, 2.0), camera_lookat=(0.0, 0.0, 0.2), - res=CAM_RES, + # Force screen-independent low-quality resolution when running unit tests for consistency + res=(640, 480), + # Enable running in background thread if supported by the platform + run_in_thread=(sys.platform == "linux"), ), profiling_options=gs.options.ProfilingOptions( show_FPS=False, @@ -962,7 +963,13 @@ def test_sensors_draw_debug(n_envs, renderer, png_snapshot): pyrender_viewer = scene.visualizer.viewer._pyrender_viewer assert pyrender_viewer.is_active rgb_arr, *_ = pyrender_viewer.render_offscreen( - pyrender_viewer._camera_node, pyrender_viewer._renderer, rgb=True, depth=False, seg=False, normal=False + pyrender_viewer._camera_node, + pyrender_viewer._renderer, + rgb=True, + depth=False, + seg=False, + normal=False, + force_render=True, ) assert rgb_array_to_png_bytes(rgb_arr) == png_snapshot From 1956f04491aee204c20cb2daac0254b6295431ef Mon Sep 17 00:00:00 2001 From: Alexis Duburcq Date: Tue, 7 Oct 2025 18:53:20 +0200 Subject: [PATCH 25/28] Fix draw debug sensor update for raycaster. --- genesis/sensors/base_sensor.py | 2 +- genesis/sensors/contact_force.py | 8 +++--- genesis/sensors/imu.py | 2 +- genesis/sensors/raycaster/raycaster.py | 34 ++++++++++++++------------ genesis/sensors/sensor_manager.py | 5 ++-- genesis/vis/rasterizer_context.py | 19 ++++++++------ pyproject.toml | 2 +- tests/test_render.py | 1 - 8 files changed, 41 insertions(+), 32 deletions(-) diff --git a/genesis/sensors/base_sensor.py b/genesis/sensors/base_sensor.py index 031c19ccd..5d39f2117 100644 --- a/genesis/sensors/base_sensor.py +++ b/genesis/sensors/base_sensor.py @@ -212,7 +212,7 @@ def _get_cache_dtype(cls) -> torch.dtype: """ raise NotImplementedError(f"{cls.__name__} has not implemented `get_cache_dtype()`.") - def _draw_debug(self, context: "RasterizerContext"): + def _draw_debug(self, context: "RasterizerContext", buffer_updates: dict[str, np.ndarray]): """ Draw debug shapes for the sensor in the scene. """ diff --git a/genesis/sensors/contact_force.py b/genesis/sensors/contact_force.py index 30a55c6ab..c1283daaf 100644 --- a/genesis/sensors/contact_force.py +++ b/genesis/sensors/contact_force.py @@ -159,7 +159,7 @@ def _update_shared_cache( buffered_data.append(shared_ground_truth_cache) cls._apply_delay_to_shared_cache(shared_metadata, shared_cache, buffered_data) - def _draw_debug(self, context: "RasterizerContext"): + def _draw_debug(self, context: "RasterizerContext", buffer_updates: dict[str, np.ndarray]): """ Draw debug sphere when the sensor detects contact. @@ -168,7 +168,7 @@ def _draw_debug(self, context: "RasterizerContext"): env_idx = context.rendered_envs_idx[0] if self._manager._sim.n_envs > 0 else None pos = self._link.get_pos(envs_idx=env_idx).squeeze(0) - is_contact = self.read(envs_idx=env_idx).squeeze(0).item() + is_contact = self.read(envs_idx=env_idx).item() if is_contact: if self.debug_object is None: @@ -176,7 +176,7 @@ def _draw_debug(self, context: "RasterizerContext"): pos=pos, radius=self._options.debug_sphere_radius, color=self._options.debug_color ) else: - context.update_debug_objects([self.debug_object], trans_to_T(pos).unsqueeze(0)) + buffer_updates.update(context.get_buffer_debug_objects([self.debug_object], [trans_to_T(pos)])) elif self.debug_object is not None: context.clear_debug_object(self.debug_object) self.debug_object = None @@ -350,7 +350,7 @@ def _update_shared_cache( shared_cache_per_sensor[torch.abs(shared_cache_per_sensor) < shared_metadata.min_force] = 0.0 cls._quantize_to_resolution(shared_metadata.resolution, shared_cache) - def _draw_debug(self, context: "RasterizerContext"): + def _draw_debug(self, context: "RasterizerContext", buffer_updates: dict[str, np.ndarray]): """ Draw debug arrow representing the contact force. diff --git a/genesis/sensors/imu.py b/genesis/sensors/imu.py index 3f67c64e9..52c2793e4 100644 --- a/genesis/sensors/imu.py +++ b/genesis/sensors/imu.py @@ -321,7 +321,7 @@ def _update_shared_cache( cls._add_noise_drift_bias(shared_metadata, shared_cache) cls._quantize_to_resolution(shared_metadata.resolution, shared_cache) - def _draw_debug(self, context: "RasterizerContext"): + def _draw_debug(self, context: "RasterizerContext", buffer_updates: dict[str, np.ndarray]): """ Draw debug arrow for the IMU acceleration. diff --git a/genesis/sensors/raycaster/raycaster.py b/genesis/sensors/raycaster/raycaster.py index f2aaf553d..b8e6df31b 100644 --- a/genesis/sensors/raycaster/raycaster.py +++ b/genesis/sensors/raycaster/raycaster.py @@ -3,6 +3,7 @@ from typing import TYPE_CHECKING, Any, NamedTuple, Type import gstaichi as ti +import numpy as np import torch from pydantic import Field @@ -563,7 +564,7 @@ def _update_shared_cache( buffered_data.append(shared_ground_truth_cache) cls._apply_delay_to_shared_cache(shared_metadata, shared_cache, buffered_data) - def _draw_debug(self, context: "RasterizerContext"): + def _draw_debug(self, context: "RasterizerContext", buffer_updates: dict[str, np.ndarray]): """ Draw hit points as spheres in the scene. @@ -582,20 +583,23 @@ def _draw_debug(self, context: "RasterizerContext"): points = transform_by_trans_quat(points + self.ray_starts, pos, quat) if not self.debug_objects: - self.debug_objects.append( - context.draw_debug_spheres( - ray_starts, - radius=self._options.debug_sphere_radius, - color=self._options.debug_ray_start_color, + for ray_start in ray_starts: + self.debug_objects.append( + context.draw_debug_sphere( + ray_start, + radius=self._options.debug_sphere_radius, + color=self._options.debug_ray_start_color, + ) ) - ) - self.debug_objects.append( - context.draw_debug_spheres( - points, - radius=self._options.debug_sphere_radius, - color=self._options.debug_ray_hit_color, + for point in points: + self.debug_objects.append( + context.draw_debug_sphere( + point, + radius=self._options.debug_sphere_radius, + color=self._options.debug_ray_hit_color, + ) ) - ) else: - context.update_debug_objects([self.debug_objects[0]], trans_to_T(ray_starts).unsqueeze(0)) - context.update_debug_objects([self.debug_objects[1]], trans_to_T(points).unsqueeze(0)) + buffer_updates.update( + context.get_buffer_debug_objects(self.debug_objects, trans_to_T(torch.cat([ray_starts, points], dim=0))) + ) diff --git a/genesis/sensors/sensor_manager.py b/genesis/sensors/sensor_manager.py index a7d20531b..834f99419 100644 --- a/genesis/sensors/sensor_manager.py +++ b/genesis/sensors/sensor_manager.py @@ -1,5 +1,6 @@ from typing import TYPE_CHECKING, Type +import numpy as np import torch from genesis.utils.ring_buffer import TensorRingBuffer @@ -86,10 +87,10 @@ def step(self): self._buffered_data[dtype][:, cache_slice], ) - def draw_debug(self, context: "RasterizerContext"): + def draw_debug(self, context: "RasterizerContext", buffer_updates: dict[str, np.ndarray]): for sensor in self.sensors: if sensor._options.draw_debug: - sensor._draw_debug(context) + sensor._draw_debug(context, buffer_updates) def reset(self, envs_idx=None): envs_idx = self._sim._scene._sanitize_envs_idx(envs_idx) diff --git a/genesis/vis/rasterizer_context.py b/genesis/vis/rasterizer_context.py index c3fa3a856..6ede33367 100644 --- a/genesis/vis/rasterizer_context.py +++ b/genesis/vis/rasterizer_context.py @@ -803,8 +803,8 @@ def update_fem(self, buffer_updates): update_data = self._scene.reorder_vertices(node, vertices) buffer_updates[self._scene.get_buffer_id(node, "pos")] = update_data - def update_sensors(self): - self.sim._sensor_manager.draw_debug(self) + def update_sensors(self, buffer_updates): + self.sim._sensor_manager.draw_debug(self, buffer_updates) def on_lights(self): for light in self.lights: @@ -907,15 +907,20 @@ def draw_debug_points(self, poss, colors=(1.0, 0.0, 0.0, 0.5)): self.add_external_node(node) return node - def update_debug_objects(self, objs, poses): - poses = tensor_to_array(poses) + def get_buffer_debug_objects(self, objs, poses): buffer_updates = {} for obj, pose in zip(objs, poses): + pose = tensor_to_array(pose) + if pose.ndim != 3: + pose = np.tile(pose[np.newaxis], (max(self.scene.n_envs, 1), 1, 1)) obj._bounds = None obj.primitives[0].poses = pose node = self.external_nodes[obj.name] - buffer_updates[self._scene.get_buffer_id(node, "model")] = poses.swapaxes(-2, -1) - self.jit.update_buffer(buffer_updates) + buffer_updates[self._scene.get_buffer_id(node, "model")] = pose.transpose((0, 2, 1)) + return buffer_updates + + def update_debug_objects(self, objs, poses): + self.jit.update_buffer(self.get_buffer_debug_objects(objs, poses)) def clear_debug_object(self, obj): self.clear_external_node(obj) @@ -949,7 +954,7 @@ def update(self, force_render: bool = False): self.update_sph(self.buffer) self.update_pbd(self.buffer) self.update_fem(self.buffer) - self.update_sensors() + self.update_sensors(self.buffer) def add_light(self, light): # light direction is light pose's -z frame diff --git a/pyproject.toml b/pyproject.toml index 5ef0f6c9b..89026c6ec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -154,7 +154,7 @@ addopts = [ "--random-order-seed=0", # "--max-worker-restart=0", "--durations=0", - "--durations-min=40.0", + "--durations-min=100.0", "-m not (benchmarks or examples)", ] filterwarnings = [ diff --git a/tests/test_render.py b/tests/test_render.py index 956ca539c..4e7fab892 100644 --- a/tests/test_render.py +++ b/tests/test_render.py @@ -969,7 +969,6 @@ def test_sensors_draw_debug(n_envs, renderer, png_snapshot): depth=False, seg=False, normal=False, - force_render=True, ) assert rgb_array_to_png_bytes(rgb_arr) == png_snapshot From cc25f4666d9c8e616f2428f441919ea114ce9ca8 Mon Sep 17 00:00:00 2001 From: Alexis Duburcq Date: Tue, 7 Oct 2025 20:30:58 +0200 Subject: [PATCH 26/28] Expose option to force refreshing rasterizer context. --- genesis/vis/viewer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/genesis/vis/viewer.py b/genesis/vis/viewer.py index a15f80b45..4eba3af82 100644 --- a/genesis/vis/viewer.py +++ b/genesis/vis/viewer.py @@ -142,7 +142,7 @@ def setup_camera(self): pose = gu.pos_lookat_up_to_T(self._camera_init_pos, self._camera_init_lookat, self._camera_up) self._camera_node = self.context.add_node(pyrender.PerspectiveCamera(yfov=yfov), pose=pose) - def update(self, auto_refresh=None): + def update(self, auto_refresh=None, force=False): if self._followed_entity is not None: self.update_following() @@ -150,7 +150,7 @@ def update(self, auto_refresh=None): with self.lock: # Update context - self.context.update() + self.context.update(force) # Refresh viewer by default if and if this is possible if auto_refresh is None: From b4c20b532755fbc0f7bbeef804915b38276360d1 Mon Sep 17 00:00:00 2001 From: Alexis Duburcq Date: Tue, 7 Oct 2025 18:30:24 +0200 Subject: [PATCH 27/28] Force headless window if supported and '--vis' is not specified in unit tests. --- tests/conftest.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index 3327e9a6e..4d721159d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -92,6 +92,10 @@ def pytest_cmdline_main(config: pytest.Config) -> None: if show_viewer: config.option.numprocesses = 0 + # Force headless rendering if available and the interactive viewer is disabled + if not show_viewer and has_egl: + pyglet.options["headless"] = True + # Disable low-level parallelization if distributed framework is enabled. # FIXME: It should be set to `max(int(physical_core_count / num_workers), 1)`, but 'num_workers' may be unknown. if not is_benchmarks and config.option.numprocesses != 0: From bf4abf712cb5f1ea6c4948f595d0e3f3be15a2bb Mon Sep 17 00:00:00 2001 From: Alexis Duburcq Date: Wed, 8 Oct 2025 00:21:41 +0200 Subject: [PATCH 28/28] Fix rasterizer rendering discrepency across platforms. --- genesis/ext/pyrender/viewer.py | 2 ++ tests/test_render.py | 6 ++++++ tests/utils.py | 2 +- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/genesis/ext/pyrender/viewer.py b/genesis/ext/pyrender/viewer.py index 88d56a317..979addbc6 100644 --- a/genesis/ext/pyrender/viewer.py +++ b/genesis/ext/pyrender/viewer.py @@ -1235,12 +1235,14 @@ def start(self, auto_refresh=True): confs = [ pyglet.gl.Config( depth_size=24, + alpha_size=8, # This parameter is essential to ensure proper pixel matching across platforms double_buffer=True, # Double buffering to avoid flickering major_version=TARGET_OPEN_GL_MAJOR, minor_version=TARGET_OPEN_GL_MINOR, ), pyglet.gl.Config( depth_size=24, + alpha_size=8, double_buffer=True, major_version=MIN_OPEN_GL_MAJOR, minor_version=MIN_OPEN_GL_MINOR, diff --git a/tests/test_render.py b/tests/test_render.py index 4e7fab892..dad31f8bb 100644 --- a/tests/test_render.py +++ b/tests/test_render.py @@ -971,6 +971,12 @@ def test_sensors_draw_debug(n_envs, renderer, png_snapshot): normal=False, ) + if sys.platform == "darwin": + glinfo = pyrender_viewer.context.get_info() + renderer = glinfo.get_renderer() + if renderer == "Apple Software Renderer": + pytest.xfail("Tile ground colors are altered on Apple Software Renderer.") + assert rgb_array_to_png_bytes(rgb_arr) == png_snapshot diff --git a/tests/utils.py b/tests/utils.py index 0d452e1c9..baf1ae4a1 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -32,7 +32,7 @@ DEFAULT_BRANCH_NAME = "main" HUGGINGFACE_ASSETS_REVISION = "16e4eae0024312b84518f4b555dd630d6b34095a" -HUGGINGFACE_SNAPSHOT_REVISION = "c5ba0d9d8eccbccb211295dd1a447c20a84299b9" +HUGGINGFACE_SNAPSHOT_REVISION = "15e836c732972cd8ddf57e43136986b34653b279" MESH_EXTENSIONS = (".mtl", *MESH_FORMATS, *GLTF_FORMATS, *USD_FORMATS) IMAGE_EXTENSIONS = (".png", ".jpg")