diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000000..55fe8e95b1 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,84 @@ +# This is a basic workflow to help you get started with Actions + +name: CI + +# Controls when the workflow will run +on: + # Triggers the workflow on push or pull request events but only for the ros1-legacy branch + push: + branches: [ ros1-legacy ] + pull_request: + branches: [ ros1-legacy ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # This workflow contains a single job called "build" + build: + name: Build on ros ${{ matrix.ros_distro }} and ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-20.04] + include: + - os: ubuntu-20.04 + ros_distro: 'noetic' + _python: 'python3' + + steps: + - uses: ros-tooling/setup-ros@v0.7 + with: + required-ros-distributions: ${{ matrix.ros_distro }} + + - name: Install dependencies + run: | + echo _python:${{ matrix._python }} + echo ros_distro:${{ matrix.ros_distro }} + sudo apt-get update + sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com --recv-key C8B3A55A6F3EFCDE + sudo add-apt-repository "deb https://librealsense.intel.com/Debian/apt-repo $(lsb_release -cs) main" + sudo apt-get update -qq + sudo apt-get install librealsense2-dev --allow-unauthenticated -y + + - name: Setup catkin workspace + run: | + echo "source /opt/ros/${{ matrix.ros_distro }}/setup.bash" >> ${{github.workspace}}/.bashrc + source ${{github.workspace}}/.bashrc + mkdir -p ${{github.workspace}}/catkin_ws/src + cd ${{github.workspace}}/catkin_ws/src + catkin_init_workspace + cd .. + + - uses: actions/checkout@v2 + with: + path: 'catkin_ws/src/realsense-ros' + + - name: Build + run: | + source ${{github.workspace}}/.bashrc + cd ${{github.workspace}}/catkin_ws + rosdep update + rosdep install -i --from-path src --rosdistro ${{ matrix.ros_distro }} -y + + catkin_make -DCATKIN_ENABLE_TESTING=False -DCMAKE_BUILD_TYPE=Release + catkin_make install + echo "source ${{github.workspace}}/catkin_ws/devel/setup.bash" >> ${{github.workspace}}/.bashrc + source ${{github.workspace}}/.bashrc + + - name: Download data + run: | + cd ${{github.workspace}}/catkin_ws + bag_filename="https://librealsense.intel.com/rs-tests/TestData/outdoors_1color.bag"; + wget $bag_filename -P "records/" + bag_filename="https://librealsense.intel.com/rs-tests/D435i_Depth_and_IMU_Stands_still.bag"; + wget $bag_filename -P "records/" + + - name: Run tests + run: | + cd ${{github.workspace}}/catkin_ws + source ${{github.workspace}}/.bashrc + ${{ matrix._python }} src/realsense-ros/realsense2_camera/scripts/rs2_test.py non_existent_file + diff --git a/.travis.yml b/.travis.yml index 889acc1931..6cd0f49fc9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,7 +13,7 @@ before_install: - echo _python:$_python - echo _ros_dist:$_ros_dist - sudo apt-key adv --keyserver hkp://keys.gnupg.net:80 --recv-key C8B3A55A6F3EFCDE - - sudo add-apt-repository "deb http://realsense-hw-public.s3.amazonaws.com/Debian/apt-repo $(lsb_release -sc) main" + - sudo add-apt-repository "deb https://librealsense.intel.com/Debian/apt-repo $(lsb_release -cs) main" - sudo apt-get update -qq - sudo apt-get install librealsense2-dkms --allow-unauthenticated -y - sudo apt-get install librealsense2-dev --allow-unauthenticated -y @@ -28,11 +28,6 @@ install: - sudo rosdep init - rosdep update - echo "source /opt/ros/$_ros_dist/setup.bash" >> ~/.bashrc - - sudo apt-get install ros-$_ros_dist-cv-bridge -y - - sudo apt-get install ros-$_ros_dist-image-transport - - sudo apt-get install ros-$_ros_dist-tf -y - - sudo apt-get install ros-$_ros_dist-diagnostic-updater -y - - sudo apt-get install ros-$_ros_dist-ddynamic-reconfigure -y - if [[ $(lsb_release -sc) == "bionic" ]] || [[ $(lsb_release -sc) == "focal" ]]; then sudo apt-get install libeigen3-dev; fi @@ -44,6 +39,8 @@ install: - cd ~/catkin_ws/src/ - catkin_init_workspace - cd .. + - rosdep install --from-paths src --ignore-src -r -y + - sudo apt purge ros-$_ros_dist-librealsense2 - catkin_make clean - catkin_make -DCATKIN_ENABLE_TESTING=False -DCMAKE_BUILD_TYPE=Release - catkin_make install @@ -51,9 +48,9 @@ install: - source ~/.bashrc # download data: - - bag_filename="http://realsense-hw-public.s3.amazonaws.com/rs-tests/TestData/outdoors_1color.bag"; + - bag_filename="https://librealsense.intel.com/rs-tests/TestData/outdoors_1color.bag"; - wget $bag_filename -P "records/" - - bag_filename="http://realsense-hw-public.s3-eu-west-1.amazonaws.com/rs-tests/D435i_Depth_and_IMU_Stands_still.bag"; + - bag_filename="https://librealsense.intel.com/rs-tests/D435i_Depth_and_IMU_Stands_still.bag"; - wget $bag_filename -P "records/" # Run test: diff --git a/README.md b/README.md index 73a5fe8f66..e71d646d5e 100644 --- a/README.md +++ b/README.md @@ -3,11 +3,9 @@ These are packages for using Intel RealSense cameras (D400 series SR300 camera a This version supports Kinetic, Melodic and Noetic distributions. -For running in ROS2-Eloquent environment please switch to the [eloquent branch](https://github.com/IntelRealSense/realsense-ros/tree/eloquent).
-For running in ROS2-Foxy environment please switch to the [foxy branch](https://github.com/IntelRealSense/realsense-ros/tree/foxy). +For running in ROS2 environment please switch to the [ros2-development branch](https://github.com/IntelRealSense/realsense-ros/tree/ros2-development).
- -LibRealSense supported version: v2.41.0 (see [realsense2_camera release notes](https://github.com/IntelRealSense/realsense-ros/releases)) +LibRealSense2 supported version: v2.50.0 (see [realsense2_camera release notes](https://github.com/IntelRealSense/realsense-ros/releases)) ## Installation Instructions @@ -28,44 +26,32 @@ LibRealSense supported version: v2.41.0 (see [realsense2_camera release notes](h realsense2_camera is available as a debian package of ROS distribution. It can be installed by typing: - ``` - export ROS_VER=kinetic - ``` - or - ``` - export ROS_VER=melodic - ``` - or - ``` - export ROS_VER=noetic - ``` - - Then install the ros packages using the environment variable created above: - - ```sudo apt-get install ros-$ROS_VER-realsense2-camera``` + ```sudo apt-get install ros-$ROS_DISTRO-realsense2-camera``` - This will install both realsense2_camera and its dependents, including librealsense2 library. + This will install both realsense2_camera and its dependents, including librealsense2 library and matching udev-rules. Notice: - * The version of librealsense2 is almost always behind the one availeable in RealSense™ official repository. + * The version of librealsense2 is almost always behind the one available in the RealSense™ official repository. * librealsense2 is not built to use native v4l2 driver but the less stable RS-USB protocol. That is because the last is more general and operational on a larger variety of platforms. * realsense2_description is available as a separate debian package of ROS distribution. It includes the 3D-models of the devices and is necessary for running launch files that include these models (i.e. rs_d435_camera_with_model.launch). It can be installed by typing: - `sudo apt-get install ros-$ROS_VER-realsense2-description` + `sudo apt-get install ros-$ROS_DISTRO-realsense2-description` *Windows* **Chocolatey distribution Coming soon** * ### Method 2: The RealSense™ distribution: - > This option is demonstrated in the [.travis.yml](https://github.com/intel-ros/realsense/blob/development/.travis.yml) file. It basically summerize the elaborate instructions in the following 2 steps: + > This option is demonstrated in the [.travis.yml](https://github.com/intel-ros/realsense/blob/ros1-legacy/.travis.yml) file. It basically summerize the elaborate instructions in the following 2 steps: ### Step 1: Install the latest Intel® RealSense™ SDK 2.0 *Ubuntu* - - * Install from [Debian Package](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md#installing-the-packages) - - In that case treat yourself as a developer. Make sure you follow the instructions to also install librealsense2-dev and librealsense-dkms packages. + + Install librealsense2 debian package: + * Jetson users - use the [Jetson Installation Guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation_jetson.md) + * Otherwise, install from [Linux Debian Installation Guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md#installing-the-packages) + - In that case treat yourself as a developer. Make sure you follow the instructions to also install librealsense2-dev and librealsense2-dkms packages. *Windows* Install using vcpkg @@ -73,7 +59,7 @@ LibRealSense supported version: v2.41.0 (see [realsense2_camera release notes](h `vcpkg install realsense2:x64-windows` #### OR - - #### Build from sources by downloading the latest [Intel® RealSense™ SDK 2.0](https://github.com/IntelRealSense/librealsense/releases/tag/v2.41.0) and follow the instructions under [Linux Installation](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md) + - #### Build from sources by downloading the latest [Intel® RealSense™ SDK 2.0](https://github.com/IntelRealSense/librealsense/releases/tag/v2.50.0) and follow the instructions under [Linux Installation](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md) ### Step 2: Install Intel® RealSense™ ROS from Sources @@ -137,8 +123,10 @@ The published topics differ according to the device and parameters. After running the above command with D435i attached, the following list of topics will be available (This is a partial list. For full one type `rostopic list`): - /camera/color/camera_info - /camera/color/image_raw +- /camera/color/metadata - /camera/depth/camera_info - /camera/depth/image_rect_raw +- /camera/depth/metadata - /camera/extrinsics/depth_to_color - /camera/extrinsics/depth_to_infra1 - /camera/extrinsics/depth_to_infra2 @@ -147,8 +135,10 @@ After running the above command with D435i attached, the following list of topic - /camera/infra2/camera_info - /camera/infra2/image_rect_raw - /camera/gyro/imu_info +- /camera/gyro/metadata - /camera/gyro/sample - /camera/accel/imu_info +- /camera/accel/metadata - /camera/accel/sample - /diagnostics @@ -172,12 +162,18 @@ The following parameters are available by the wrapper: - **rosbag_filename**: Will publish topics from rosbag file. - **initial_reset**: On occasions the device was not closed properly and due to firmware issues needs to reset. If set to true, the device will reset prior to usage. -- **align_depth**: If set to true, will publish additional topics with the all the images aligned to the depth image.
-The topics are of the form: ```/camera/aligned_depth_to_color/image_raw``` etc. +- **reconnect_timeout**: When the driver cannot connect to the device try to reconnect after this timeout (in seconds). +- **align_depth**: If set to true, will publish additional topics for the "aligned depth to color" image.: ```/camera/aligned_depth_to_color/image_raw```, ```/camera/aligned_depth_to_color/camera_info```.
+The pointcloud, if enabled, will be built based on the aligned_depth_to_color image.
- **filters**: any of the following options, separated by commas:
- ```colorizer```: will color the depth image. On the depth topic an RGB image will be published, instead of the 16bit depth values . - - ```pointcloud```: will add a pointcloud topic `/camera/depth/color/points`. The texture of the pointcloud can be modified in rqt_reconfigure (see below) or using the parameters: `pointcloud_texture_stream` and `pointcloud_texture_index`. Run rqt_reconfigure to see available values for these parameters.
- The depth FOV and the texture FOV are not similar. By default, pointcloud is limited to the section of depth containing the texture. You can have a full depth to pointcloud, coloring the regions beyond the texture with zeros, by setting `allow_no_texture_points` to true. + - ```pointcloud```: will add a pointcloud topic `/camera/depth/color/points`. + * The texture of the pointcloud can be modified in rqt_reconfigure (see below) or using the parameters: `pointcloud_texture_stream` and `pointcloud_texture_index`. Run rqt_reconfigure to see available values for these parameters.
+ * The depth FOV and the texture FOV are not similar. By default, pointcloud is limited to the section of depth containing the texture. You can have a full depth to pointcloud, coloring the regions beyond the texture with zeros, by setting `allow_no_texture_points` to true. + * pointcloud is of an unordered format by default. This can be changed by setting `ordered_pc` to true. +- ```hdr_merge```: Allows depth image to be created by merging the information from 2 consecutive frames, taken with different exposure and gain values. The way to set exposure and gain values for each sequence in runtime is by first selecting the sequence id, using rqt_reconfigure `stereo_module/sequence_id` parameter and then modifying the `stereo_module/gain`, and `stereo_module/exposure`.
To view the effect on the infrared image for each sequence id use the `sequence_id_filter/sequence_id` parameter.
To initialize these parameters in start time use the following parameters:
+ `stereo_module/exposure/1`, `stereo_module/gain/1`, `stereo_module/exposure/2`, `stereo_module/gain/2`
+ \* For in-depth review of the subject please read the accompanying [white paper](https://dev.intelrealsense.com/docs/high-dynamic-range-with-stereoscopic-depth-cameras). - The following filters have detailed descriptions in : https://github.com/IntelRealSense/librealsense/blob/master/doc/post-processing-filters.md - ```disparity``` - convert depth to disparity before applying other filters and back. @@ -186,9 +182,10 @@ The topics are of the form: ```/camera/aligned_depth_to_color/image_raw``` etc. - ```hole_filling``` - apply hole-filling filter. - ```decimation``` - reduces depth scene complexity. - **enable_sync**: gathers closest frames of different sensors, infra red, color and depth, to be sent with the same timetag. This happens automatically when such filters as pointcloud are enabled. -- ****_width**, ****_height**, ****_fps**: can be any of *infra, color, fisheye, depth, gyro, accel, pose*. Sets the required format of the device. If the specified combination of parameters is not available by the device, the stream will not be published. Setting a value to 0, will choose the first format in the inner list. (i.e. consistent between runs but not defined). Note: for gyro accel and pose, only _fps option is meaningful. -- **enable_****: Choose whether to enable a specified stream or not. Default is true. can be any of *infra1, infra2, color, depth, fisheye, fisheye1, fisheye2, gyro, accel, pose*. +- ****_width**, ****_height**, ****_fps**: can be any of *infra, color, fisheye, depth, gyro, accel, pose, confidence*. Sets the required format of the device. If the specified combination of parameters is not available by the device, the stream will be replaced with the default for that stream. Setting a value to 0, will choose the first format in the inner list. (i.e. consistent between runs but not defined).
*Note: for gyro accel and pose, only _fps option is meaningful. +- **enable_****: Choose whether to enable a specified stream or not. Default is true for images and false for orientation streams. can be any of *infra1, infra2, color, depth, fisheye, fisheye1, fisheye2, gyro, accel, pose, confidence*. - **tf_prefix**: By default all frame's ids have the same prefix - `camera_`. This allows changing it per camera. +- ****_frame_id**, ****_optical_frame_id**, **aligned_depth_to_**_frame_id**: Specify the different frame_id for the different frames. Especially important when using multiple cameras. - **base_frame_id**: defines the frame_id all static transformations refers to. - **odom_frame_id**: defines the origin coordinate system in ROS convention (X-Forward, Y-Left, Z-Up). pose topic defines the pose relative to that system. - **All the rest of the frame_ids can be found in the template launch file: [nodelet.launch.xml](./realsense2_camera/launch/includes/nodelet.launch.xml)** @@ -209,6 +206,10 @@ Setting *unite_imu_method* creates a new topic, *imu*, that replaces the default - **NOTE** To enable the Infrared stream, you should enable `enable_infra:=true` NOT `enable_infra1:=true` nor `enable_infra2:=true` - **NOTE** This feature is only supported by Realsense sensors with RGB streams available from the `infra` cameras, which can be checked by observing the output of `rs-enumerate-devices` +### Available services: +- reset : Cause a hardware reset of the device. Usage: `rosservice call /camera/realsense2_camera/reset` +- enable : Start/Stop all streaming sensors. Usage example: `rosservice call /camera/enable False"` +- device_info : retrieve information about the device - serial_number, firmware_version etc. Type `rosservice type /camera/realsense2_camera/device_info | rossrv show` for the full list. Call example: `rosservice call /camera/realsense2_camera/device_info` ### Point Cloud Here is an example of how to start the camera node and make it publish the point cloud using the pointcloud option. @@ -299,8 +300,8 @@ roslaunch realsense2_description view_d415_model.launch Unit-tests are based on bag files saved on S3 server. These can be downloaded using the following commands: ```bash cd catkin_ws -wget "http://realsense-hw-public.s3.amazonaws.com/rs-tests/TestData/outdoors.bag" -P "records/" -wget "http://realsense-hw-public.s3-eu-west-1.amazonaws.com/rs-tests/D435i_Depth_and_IMU_Stands_still.bag" -P "records/" +wget "https://librealsense.intel.com/rs-tests/TestData/outdoors.bag" -P "records/" +wget "https://librealsense.intel.com/rs-tests/D435i_Depth_and_IMU_Stands_still.bag" -P "records/" ``` Then, unit-tests can be run using the following command (use either python or python3): ```bash diff --git a/realsense2_camera/CHANGELOG.rst b/realsense2_camera/CHANGELOG.rst index 4e68cf344c..b6f04985ce 100644 --- a/realsense2_camera/CHANGELOG.rst +++ b/realsense2_camera/CHANGELOG.rst @@ -2,6 +2,59 @@ Changelog for package realsense2_camera ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +2.3.2 (2021-11-15) +------------------ +* publish metadata +* Add service: device_info +* add wait_for_device_timeout parameter +* Add reconnect_timeout parameter +* show warning when requested profile cannot be selected. +* send only 4 distortion coeffs when using equidistant +* fixed missing std namespace +* Removing spaces when iterating filters +* Contributors: Collin Avidano, Gintaras, Jacco van der Spek, doronhi + +2.3.1 (2021-07-01) +------------------ +* add respawn option +* add udev rules to debian installation +* Add support for L535 +* Fix occasional missing diagnostic messages +* Contributors: Alex Fernandes Neves, doronhi + +2.3.0 (2021-05-05) +------------------ +* Fix pointcloud message size when no texture is added. +* Added filling correct Tx, Ty values in projection matrix of right camera. +* Fixed frame_id of right sensor to match left sensor in a stereo pair.pair +* Contributors: Pavlo Kolomiiets, doronhi + +2.2.24 (2021-04-21) +------------------- +* Enabling pointcloud while align_depth is set to true creates a pointcloud aligned to color image. +* Removed option to align depth to other streams other then color. +* Contributors: doronhi + +2.2.23 (2021-03-24) +------------------- +* Remove the following tests for known playback issue with librealsense2 version 2.43.0: points_cloud_1, align_depth_color_1, align_depth_ir1_1, align_depth_ir1_decimation_1. +* Add filter: HDR_merge +* add default values to infra stream in rs_camera.launch as non are defined in librealsense2. +* fix bug: selection of profile disregarded stream index. +* fix initialization of colorizer inner image +* Contributors: doronhi + +2.2.22 (2021-02-18) +------------------- +* Add reset service. +* fix timestamp domain issues + - Add offset to ros_time only if device uses hardware-clock. Otherwise use device time - either system_time or global_time. + - Warn of a hardware timestamp possible loop. +* Choose the default profile in case of an invalid request. +* Avoid aligning confidence image. +* Add an option for an Ordered PointCloud. +* Contributors: Isaac I.Y. Saito, Itamar Eliakim, Marc Alban, doronhi + 2.2.21 (2020-12-31) ------------------- * Publish depth confidence image for supporting devices (L515) diff --git a/realsense2_camera/CMakeLists.txt b/realsense2_camera/CMakeLists.txt index 0a6e1ec581..2e516b7e22 100644 --- a/realsense2_camera/CMakeLists.txt +++ b/realsense2_camera/CMakeLists.txt @@ -1,6 +1,5 @@ cmake_minimum_required(VERSION 2.8.3) project(realsense2_camera) -add_compile_options(-std=c++11) option(BUILD_WITH_OPENMP "Use OpenMP" OFF) option(SET_USER_BREAK_AT_STARTUP "Set user wait point in startup (for debug)" OFF) @@ -14,6 +13,7 @@ find_package(catkin REQUIRED COMPONENTS roscpp sensor_msgs std_msgs + std_srvs nodelet cv_bridge image_transport @@ -40,7 +40,7 @@ endif() if (WIN32) find_package(realsense2 CONFIG REQUIRED) else() -find_package(realsense2 2.41.0) +find_package(realsense2 2.50.0) endif() if(NOT realsense2_FOUND) @@ -61,15 +61,21 @@ endif() if (WIN32) else() -set(CMAKE_CXX_FLAGS "-fPIE -fPIC -std=c++11 -D_FORTIFY_SOURCE=2 -fstack-protector -Wformat -Wformat-security -Wall ${CMAKE_CXX_FLAGS}") +set(CMAKE_CXX_FLAGS "-fPIE -fPIC -D_FORTIFY_SOURCE=2 -fstack-protector -Wformat -Wformat-security -Wall ${CMAKE_CXX_FLAGS}") endif() add_message_files( FILES IMUInfo.msg Extrinsics.msg + Metadata.msg ) +add_service_files( + FILES + DeviceInfo.srv +) + generate_messages( DEPENDENCIES sensor_msgs @@ -85,6 +91,7 @@ include_directories( # RealSense ROS Node catkin_package( + INCLUDE_DIRS include LIBRARIES ${PROJECT_NAME} CATKIN_DEPENDS message_runtime roscpp sensor_msgs std_msgs nodelet @@ -95,10 +102,10 @@ catkin_package( ) add_library(${PROJECT_NAME} - include/constants.h - include/realsense_node_factory.h - include/base_realsense_node.h - include/t265_realsense_node.h + include/realsense2_camera/constants.h + include/realsense2_camera/realsense_node_factory.h + include/realsense2_camera/base_realsense_node.h + include/realsense2_camera/t265_realsense_node.h src/realsense_node_factory.cpp src/base_realsense_node.cpp src/t265_realsense_node.cpp @@ -133,7 +140,7 @@ install(TARGETS ${PROJECT_NAME} ) # Install header files -install(DIRECTORY include/ +install(DIRECTORY include/realsense2_camera DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} ) diff --git a/realsense2_camera/debian/udev.em b/realsense2_camera/debian/udev.em new file mode 100644 index 0000000000..ca9dd16dcf --- /dev/null +++ b/realsense2_camera/debian/udev.em @@ -0,0 +1,81 @@ +##Version=1.1## +# Device rules for Intel RealSense devices (R200, F200, SR300 LR200, ZR300, D400, L500, T200) +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0a80", MODE:="0666", GROUP:="plugdev", RUN+="/usr/local/bin/usb-R200-in_udev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0a66", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0aa3", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0aa2", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0aa5", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0abf", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0acb", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0ad0", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="04b4", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0ad1", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0ad2", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0ad3", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0ad4", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0ad5", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0ad6", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0af2", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0af6", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0afe", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0aff", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b00", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b01", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b03", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b07", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b0c", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b0d", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b3a", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b3d", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b48", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b49", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b4b", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b4d", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b52", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b5b", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b5c", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b64", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b68", MODE:="0666", GROUP:="plugdev" + +# Intel RealSense recovery devices (DFU) +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0ab3", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0adb", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0adc", MODE:="0666", GROUP:="plugdev" +SUBSYSTEMS=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b55", MODE:="0666", GROUP:="plugdev" + +# Intel RealSense devices (Movidius, T265) +SUBSYSTEMS=="usb", ENV{DEVTYPE}=="usb_device", ATTRS{idVendor}=="8087", ATTRS{idProduct}=="0af3", MODE="0666", GROUP="plugdev" +SUBSYSTEMS=="usb", ENV{DEVTYPE}=="usb_device", ATTRS{idVendor}=="8087", ATTRS{idProduct}=="0b37", MODE="0666", GROUP="plugdev" +SUBSYSTEMS=="usb", ENV{DEVTYPE}=="usb_device", ATTRS{idVendor}=="03e7", ATTRS{idProduct}=="2150", MODE="0666", GROUP="plugdev" + +KERNEL=="iio*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0ad5", MODE:="0777", GROUP:="plugdev", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p'" +DRIVER=="hid_sensor_custom", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0ad5", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p && chmod 0777 /dev/%k'" +KERNEL=="iio*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0af2", MODE:="0777", GROUP:="plugdev", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p'" +DRIVER=="hid_sensor*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0af2", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p && chmod 0777 /dev/%k'" +KERNEL=="iio*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0afe", MODE:="0777", GROUP:="plugdev", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p'" +DRIVER=="hid_sensor_custom", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0afe", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p && chmod 0777 /dev/%k'" +KERNEL=="iio*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0aff", MODE:="0777", GROUP:="plugdev", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p'" +DRIVER=="hid_sensor_custom", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0aff", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p && chmod 0777 /dev/%k'" +KERNEL=="iio*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b00", MODE:="0777", GROUP:="plugdev", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p'" +DRIVER=="hid_sensor_custom", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b00", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p && chmod 0777 /dev/%k'" +KERNEL=="iio*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b01", MODE:="0777", GROUP:="plugdev", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p'" +DRIVER=="hid_sensor_custom", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b01", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p && chmod 0777 /dev/%k'" +KERNEL=="iio*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b3a", MODE:="0777", GROUP:="plugdev", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p'" +DRIVER=="hid_sensor*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b3a", RUN+="/bin/sh -c ' chmod -R 0777 /sys/%p && chmod 0777 /dev/%k'" +KERNEL=="iio*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b3d", MODE:="0777", GROUP:="plugdev", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p'" +DRIVER=="hid_sensor*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b3d", RUN+="/bin/sh -c ' chmod -R 0777 /sys/%p && chmod 0777 /dev/%k'" +KERNEL=="iio*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b4b", MODE:="0777", GROUP:="plugdev", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p'" +DRIVER=="hid_sensor*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b4b", RUN+="/bin/sh -c ' chmod -R 0777 /sys/%p && chmod 0777 /dev/%k'" +KERNEL=="iio*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b4d", MODE:="0777", GROUP:="plugdev", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p'" +DRIVER=="hid_sensor*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b4d", RUN+="/bin/sh -c ' chmod -R 0777 /sys/%p && chmod 0777 /dev/%k'" +KERNEL=="iio*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b5b", MODE:="0777", GROUP:="plugdev", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p'" +DRIVER=="hid_sensor*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b5b", RUN+="/bin/sh -c ' chmod -R 0777 /sys/%p && chmod 0777 /dev/%k'" +KERNEL=="iio*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b5c", MODE:="0777", GROUP:="plugdev", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p'" +DRIVER=="hid_sensor*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b5c", RUN+="/bin/sh -c ' chmod -R 0777 /sys/%p && chmod 0777 /dev/%k'" +KERNEL=="iio*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b64", MODE:="0777", GROUP:="plugdev", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p'" +DRIVER=="hid_sensor*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b64", RUN+="/bin/sh -c ' chmod -R 0777 /sys/%p && chmod 0777 /dev/%k'" +KERNEL=="iio*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b68", MODE:="0777", GROUP:="plugdev", RUN+="/bin/sh -c 'chmod -R 0777 /sys/%p'" +DRIVER=="hid_sensor*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b68", RUN+="/bin/sh -c ' chmod -R 0777 /sys/%p && chmod 0777 /dev/%k'" + +# For products with motion_module, if (kernels is 4.15 and up) and (device name is "accel_3d") wait, in another process, until (enable flag is set to 1 or 200 mSec passed) and then set it to 0. +KERNEL=="iio*", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0ad5|0afe|0aff|0b00|0b01|0b3a|0b3d|0b64|0b68", RUN+="/bin/sh -c '(major=`uname -r | cut -d \".\" -f1` && minor=`uname -r | cut -d \".\" -f2` && (([ $major -eq 4 ] && [ $minor -ge 15 ]) || [ $major -ge 5 ])) && (enamefile=/sys/%p/name && [ `cat $enamefile` = \"accel_3d\" ]) && enfile=/sys/%p/buffer/enable && echo \"COUNTER=0; while [ \$COUNTER -lt 20 ] && grep -q 0 $enfile; do sleep 0.01; COUNTER=\$((COUNTER+1)); done && echo 0 > $enfile\" | at now'" diff --git a/realsense2_camera/include/base_realsense_node.h b/realsense2_camera/include/realsense2_camera/base_realsense_node.h similarity index 91% rename from realsense2_camera/include/base_realsense_node.h rename to realsense2_camera/include/realsense2_camera/base_realsense_node.h index 3836ff1d76..de1364eca5 100644 --- a/realsense2_camera/include/base_realsense_node.h +++ b/realsense2_camera/include/realsense2_camera/base_realsense_node.h @@ -3,7 +3,9 @@ #pragma once -#include "../include/realsense_node_factory.h" +#include "realsense2_camera/realsense_node_factory.h" +#include +#include "realsense2_camera/Metadata.h" #include #include @@ -163,8 +165,11 @@ namespace realsense2_camera ros::NodeHandle& _node_handle, _pnh; bool _align_depth; std::vector _monitor_options; + std::shared_ptr _device_info_srv; virtual void calcAndPublishStaticTransform(const stream_index_pair& stream, const rs2::stream_profile& base_profile); + bool getDeviceInfo(realsense2_camera::DeviceInfo::Request& req, + realsense2_camera::DeviceInfo::Response& res); rs2::stream_profile getAProfile(const stream_index_pair& stream); tf::Quaternion rotationMatrixToQuaternion(const float rotation[9]) const; void publish_static_tf(const ros::Time& t, @@ -198,11 +203,11 @@ namespace realsense2_camera void enable_devices(); void setupFilters(); void setupStreams(); - void setBaseTime(double frame_time, bool warn_no_metadata); + bool setBaseTime(double frame_time, rs2_timestamp_domain time_domain); + double frameSystemTimeSec(rs2::frame frame); cv::Mat& fix_depth_scale(const cv::Mat& from_image, cv::Mat& to_image); void clip_depth(rs2::depth_frame depth_frame, float clipping_dist); void updateStreamCalibData(const rs2::video_stream_profile& video_profile); - void updateExtrinsicsCalibData(const rs2::video_stream_profile& left_video_profile, const rs2::video_stream_profile& right_video_profile); void SetBaseStream(); void publishStaticTransforms(); void publishDynamicTransforms(); @@ -217,11 +222,12 @@ namespace realsense2_camera std::map& images, const std::map& info_publishers, const std::map& image_publishers, + const bool is_publishMetadata, std::map& seq, std::map& camera_info, - const std::map& optical_frame_id, const std::map& encoding, bool copy_data_from_frame = true); + void publishMetadata(rs2::frame f, const ros::Time& header_time, const std::string& frame_id); bool getEnabledProfile(const stream_index_pair& stream_index, rs2::stream_profile& profile); void publishAlignedDepthToOthers(rs2::frameset frames, const ros::Time& t); @@ -236,6 +242,9 @@ namespace realsense2_camera void multiple_message_callback(rs2::frame frame, imu_sync_method sync_method); void frame_callback(rs2::frame frame); void registerDynamicOption(ros::NodeHandle& nh, rs2::options sensor, std::string& module_name); + void registerHDRoptions(); + void set_sensor_parameter_to_ros(const std::string& module_name, rs2::options sensor, rs2_option option); + void monitor_update_functions(); void readAndSetDynamicParam(ros::NodeHandle& nh1, std::shared_ptr ddynrec, const std::string option_name, const int min_val, const int max_val, rs2::sensor sensor, int* option_value); void registerAutoExposureROIOptions(ros::NodeHandle& nh); void set_auto_exposure_roi(const std::string option_name, rs2::sensor sensor, int new_value); @@ -244,6 +253,7 @@ namespace realsense2_camera void startMonitoring(); void publish_temperature(); void publish_frequency_update(); + void publishServices(); rs2::device _dev; std::map _sensors; @@ -255,6 +265,8 @@ namespace realsense2_camera float _depth_scale_meters; float _clipping_distance; bool _allow_no_texture_points; + bool _ordered_pc; + double _linear_accel_cov; double _angular_velocity_cov; @@ -272,13 +284,14 @@ namespace realsense2_camera tf2_ros::StaticTransformBroadcaster _static_tf_broadcaster; tf2_ros::TransformBroadcaster _dynamic_tf_broadcaster; std::vector _static_tf_msgs; - std::shared_ptr _tf_t; + std::shared_ptr _tf_t, _update_functions_t; std::map _image_publishers; std::map _imu_publishers; std::shared_ptr _synced_imu_publisher; std::map _image_format; std::map _info_publisher; + std::map> _metadata_publishers; std::map _image; std::map _encoding; @@ -299,8 +312,8 @@ namespace realsense2_camera stream_index_pair _pointcloud_texture; PipelineSyncer _syncer; std::vector _filters; + std::shared_ptr _colorizer, _pointcloud_filter; std::vector _dev_sensors; - std::map> _align; std::map _depth_aligned_image; std::map _depth_scaled_image; @@ -318,14 +331,14 @@ namespace realsense2_camera typedef std::pair> OptionTemperatureDiag; std::vector< OptionTemperatureDiag > _temperature_nodes; std::shared_ptr _monitoring_t; - mutable std::condition_variable _cv; + std::vector > _update_functions_v; + mutable std::condition_variable _cv_monitoring, _cv_tf, _update_functions_cv; stream_index_pair _base_stream; const std::string _namespace; sensor_msgs::PointCloud2 _msg_pointcloud; std::vector< unsigned int > _valid_pc_indices; - };//end class } diff --git a/realsense2_camera/include/constants.h b/realsense2_camera/include/realsense2_camera/constants.h similarity index 92% rename from realsense2_camera/include/constants.h rename to realsense2_camera/include/realsense2_camera/constants.h index 272d846db6..12c3b43d38 100644 --- a/realsense2_camera/include/constants.h +++ b/realsense2_camera/include/realsense2_camera/constants.h @@ -6,8 +6,8 @@ #include #define REALSENSE_ROS_MAJOR_VERSION 2 -#define REALSENSE_ROS_MINOR_VERSION 2 -#define REALSENSE_ROS_PATCH_VERSION 21 +#define REALSENSE_ROS_MINOR_VERSION 3 +#define REALSENSE_ROS_PATCH_VERSION 2 #define STRINGIFY(arg) #arg #define VAR_ARG_STRING(arg) STRINGIFY(arg) @@ -34,17 +34,19 @@ namespace realsense2_camera const uint16_t RS435i_RGB_PID = 0x0B3A; // AWGC_MM const uint16_t RS465_PID = 0x0b4d; // D465 const uint16_t RS416_RGB_PID = 0x0B52; // F416 RGB - const uint16_t RS405_PID = 0x0b0c; // DS5U + const uint16_t RS405_PID = 0x0B5B; // DS5U const uint16_t RS455_PID = 0x0B5C; // D455 const uint16_t RS_T265_PID = 0x0b37; // const uint16_t RS_L515_PID_PRE_PRQ = 0x0B3D; // const uint16_t RS_L515_PID = 0x0B64; // + const uint16_t RS_L535_PID = 0x0b68; - const bool ALIGN_DEPTH = false; - const bool POINTCLOUD = false; + const bool ALIGN_DEPTH = false; + const bool POINTCLOUD = false; const bool ALLOW_NO_TEXTURE_POINTS = false; - const bool SYNC_FRAMES = false; + const bool ORDERED_POINTCLOUD = false; + const bool SYNC_FRAMES = false; const bool PUBLISH_TF = true; const double TF_PUBLISH_RATE = 0; // Static transform diff --git a/realsense2_camera/include/realsense_node_factory.h b/realsense2_camera/include/realsense2_camera/realsense_node_factory.h similarity index 88% rename from realsense2_camera/include/realsense_node_factory.h rename to realsense2_camera/include/realsense2_camera/realsense_node_factory.h index ff9f9c01bc..2fc6664602 100644 --- a/realsense2_camera/include/realsense_node_factory.h +++ b/realsense2_camera/include/realsense2_camera/realsense_node_factory.h @@ -14,13 +14,14 @@ #include #include #include -#include +#include #include #include #include #include #include #include +#include namespace realsense2_camera { @@ -65,12 +66,15 @@ namespace realsense2_camera void change_device_callback(rs2::event_information& info); void getDevice(rs2::device_list list); virtual void onInit() override; + void initialize(const ros::WallTimerEvent &ignored); void tryGetLogSeverity(rs2_log_severity& severity) const; + void reset(); + bool handleReset(std_srvs::Empty::Request& request, std_srvs::Empty::Response& response); static std::string parse_usb_port(std::string line); bool toggle_sensor_callback(std_srvs::SetBool::Request &req, std_srvs::SetBool::Response &res); rs2::device _device; - std::unique_ptr _realSenseNode; + std::shared_ptr _realSenseNode; rs2::context _ctx; std::string _serial_no; std::string _usb_port_id; @@ -79,6 +83,8 @@ namespace realsense2_camera std::thread _query_thread; bool _is_alive; ros::ServiceServer toggle_sensor_srv; + ros::WallTimer _init_timer; + ros::ServiceServer _reset_srv; }; }//end namespace diff --git a/realsense2_camera/include/t265_realsense_node.h b/realsense2_camera/include/realsense2_camera/t265_realsense_node.h similarity index 93% rename from realsense2_camera/include/t265_realsense_node.h rename to realsense2_camera/include/realsense2_camera/t265_realsense_node.h index beb8253f6b..543d8584d9 100644 --- a/realsense2_camera/include/t265_realsense_node.h +++ b/realsense2_camera/include/realsense2_camera/t265_realsense_node.h @@ -1,6 +1,6 @@ #pragma once -#include +#include namespace realsense2_camera { diff --git a/realsense2_camera/launch/includes/nodelet.launch.xml b/realsense2_camera/launch/includes/nodelet.launch.xml index 9c458d3ed0..8452b4259d 100644 --- a/realsense2_camera/launch/includes/nodelet.launch.xml +++ b/realsense2_camera/launch/includes/nodelet.launch.xml @@ -9,6 +9,7 @@ + @@ -49,6 +50,8 @@ + + @@ -91,16 +94,24 @@ + + + + + + + + - + - - + + @@ -110,6 +121,9 @@ + + + @@ -187,12 +201,18 @@ + + + + + + + - diff --git a/realsense2_camera/launch/rs_camera.launch b/realsense2_camera/launch/rs_camera.launch index c85dd1ce48..75b0026bef 100644 --- a/realsense2_camera/launch/rs_camera.launch +++ b/realsense2_camera/launch/rs_camera.launch @@ -8,43 +8,46 @@ + - - + + - - + + - - + + - + - + - - + + - - + + - - - + + + + + @@ -56,11 +59,19 @@ + + - + + + + + + + @@ -68,6 +79,7 @@ + @@ -119,11 +131,20 @@ + + + + + + + + + diff --git a/realsense2_camera/launch/rs_d400_and_t265.launch b/realsense2_camera/launch/rs_d400_and_t265.launch index 11d0557b70..3400d2e5db 100644 --- a/realsense2_camera/launch/rs_d400_and_t265.launch +++ b/realsense2_camera/launch/rs_d400_and_t265.launch @@ -8,6 +8,7 @@ + @@ -23,6 +24,7 @@ + @@ -37,6 +39,7 @@ + diff --git a/realsense2_camera/launch/rs_d435_camera_with_model.launch b/realsense2_camera/launch/rs_d435_camera_with_model.launch index 4e41790f1f..2c20de72ba 100644 --- a/realsense2_camera/launch/rs_d435_camera_with_model.launch +++ b/realsense2_camera/launch/rs_d435_camera_with_model.launch @@ -43,6 +43,7 @@ + @@ -93,6 +94,7 @@ + diff --git a/realsense2_camera/launch/rs_multiple_devices.launch b/realsense2_camera/launch/rs_multiple_devices.launch index 20ef3cf243..84414edba1 100644 --- a/realsense2_camera/launch/rs_multiple_devices.launch +++ b/realsense2_camera/launch/rs_multiple_devices.launch @@ -9,12 +9,14 @@ + + @@ -23,6 +25,7 @@ + @@ -31,6 +34,7 @@ + diff --git a/realsense2_camera/launch/rs_rgbd.launch b/realsense2_camera/launch/rs_rgbd.launch index 3232b43d21..94e97670ad 100644 --- a/realsense2_camera/launch/rs_rgbd.launch +++ b/realsense2_camera/launch/rs_rgbd.launch @@ -52,29 +52,29 @@ Processing enabled by this node: - - + + - - + + - - + + - - + + - - - - - - + + + + + + diff --git a/realsense2_camera/launch/rs_t265.launch b/realsense2_camera/launch/rs_t265.launch index 9ab3073e33..c042a6be55 100644 --- a/realsense2_camera/launch/rs_t265.launch +++ b/realsense2_camera/launch/rs_t265.launch @@ -12,15 +12,15 @@ https://github.com/IntelRealSense/librealsense/blob/master/third-party/libtm/lib - - + + - + - - + + @@ -29,6 +29,7 @@ https://github.com/IntelRealSense/librealsense/blob/master/third-party/libtm/lib + @@ -57,6 +58,7 @@ https://github.com/IntelRealSense/librealsense/blob/master/third-party/libtm/lib + diff --git a/realsense2_camera/msg/Metadata.msg b/realsense2_camera/msg/Metadata.msg new file mode 100644 index 0000000000..d837c3b9d3 --- /dev/null +++ b/realsense2_camera/msg/Metadata.msg @@ -0,0 +1,2 @@ +std_msgs/Header header +string json_data \ No newline at end of file diff --git a/realsense2_camera/package.xml b/realsense2_camera/package.xml index d973577ddc..937bbd66cb 100644 --- a/realsense2_camera/package.xml +++ b/realsense2_camera/package.xml @@ -1,9 +1,8 @@ realsense2_camera - 2.2.21 + 2.3.2 RealSense Camera package allowing access to Intel T265 Tracking module and SR300 and D400 3D cameras - Sergey Dorodnicov Doron Hirshberg Apache 2.0 @@ -13,6 +12,7 @@ Sergey Dorodnicov Doron Hirshberg catkin + message_generation eigen image_transport cv_bridge @@ -22,6 +22,7 @@ roscpp sensor_msgs std_msgs + std_srvs message_runtime tf ddynamic_reconfigure diff --git a/realsense2_camera/scripts/echo_metadada.py b/realsense2_camera/scripts/echo_metadada.py new file mode 100644 index 0000000000..1b0c31b6aa --- /dev/null +++ b/realsense2_camera/scripts/echo_metadada.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +import os +import sys +import rospy +from realsense2_camera.msg import Metadata +import json + +def metadata_cb(msg): + aa = json.loads(msg.json_data) + os.system('clear') + print('header:', msg.header) + print('\n'.join(['%10s:%-10s' % (key, str(value)) for key, value in aa.items()])) + +def main(): + if len(sys.argv) < 2 or '--help' in sys.argv or '/?' in sys.argv: + print ('USAGE:') + print('echo_metadata.py ') + print('Demo for listening on given metadata topic.') + print('App subscribes on given topic') + print('App then prints metadata from messages') + print('') + print('Example: echo_metadata.py /camera/depth/metadata') + print('') + exit(-1) + + topic = sys.argv[1] + + rospy.init_node('metadata_tester', anonymous=True) + + depth_sub = rospy.Subscriber(topic, Metadata, metadata_cb) + + rospy.spin() + +if __name__ == '__main__': + main() diff --git a/realsense2_camera/scripts/rs2_listener.py b/realsense2_camera/scripts/rs2_listener.py index 8b576c19aa..3ac35723a5 100644 --- a/realsense2_camera/scripts/rs2_listener.py +++ b/realsense2_camera/scripts/rs2_listener.py @@ -2,6 +2,7 @@ import time import rospy from sensor_msgs.msg import Image as msg_Image +from sensor_msgs.msg import CompressedImage as msg_CompressedImage from sensor_msgs.msg import PointCloud2 as msg_PointCloud2 import sensor_msgs.point_cloud2 as pc2 from sensor_msgs.msg import Imu as msg_Imu @@ -38,7 +39,7 @@ def __init__(self, params={}): self.result = None self.break_timeout = False - self.timeout = params.get('timeout_secs', -1) + self.timeout = params.get('timeout_secs', -1) * 1e-3 self.seq = params.get('seq', -1) self.time = params.get('time', None) self.node_name = params.get('node_name', 'rs2_listener') @@ -216,7 +217,8 @@ def callback(self, data): self.prev_msg_data = data self.prev_time = time.time() - if any([self.seq > 0 and data.header.seq >= self.seq, + if any([self.seq < 0 and self.time is None, + self.seq > 0 and data.header.seq >= self.seq, self.time and data.header.stamp.secs == self.time['secs'] and data.header.stamp.nsecs == self.time['nsecs']]): self.result = data self.sub.unregister() @@ -256,6 +258,8 @@ def main(): except NameError as e: print ('theora_image_transport is not installed. \nType "sudo apt-get install ros-kinetic-theora-image-transport" to enable registering on messages of type theora.') raise + elif 'compressed' in wanted_topic: + msg_type = msg_CompressedImage else: msg_type = msg_Image @@ -274,6 +278,10 @@ def main(): msg_params.setdefault('topic', wanted_topic) res = msg_retriever.wait_for_message(msg_params, msg_type) rospy.loginfo('Got message: %s' % res.header) + if (hasattr(res, 'encoding')): + print ('res.encoding:', res.encoding) + if (hasattr(res, 'format')): + print ('res.format:', res.format) else: themes = [wanted_topic] res = msg_retriever.wait_for_messages(themes) diff --git a/realsense2_camera/scripts/rs2_test.py b/realsense2_camera/scripts/rs2_test.py index 1398cd8e2d..9498bd8cb0 100644 --- a/realsense2_camera/scripts/rs2_test.py +++ b/realsense2_camera/scripts/rs2_test.py @@ -346,11 +346,11 @@ def main(): {'name': 'vis_avg_2', 'type': 'vis_avg', 'params': {'rosbag_filename': outdoors_filename}}, {'name': 'depth_avg_1', 'type': 'depth_avg', 'params': {'rosbag_filename': outdoors_filename}}, {'name': 'depth_w_cloud_1', 'type': 'depth_avg', 'params': {'rosbag_filename': outdoors_filename, 'enable_pointcloud': 'true'}}, - {'name': 'points_cloud_1', 'type': 'pointscloud_avg', 'params': {'rosbag_filename': outdoors_filename, 'enable_pointcloud': 'true'}}, - {'name': 'align_depth_color_1', 'type': 'align_depth_color', 'params': {'rosbag_filename': outdoors_filename, 'align_depth': 'true'}}, - {'name': 'align_depth_ir1_1', 'type': 'align_depth_ir1', 'params': {'rosbag_filename': outdoors_filename, 'align_depth': 'true'}}, + # {'name': 'points_cloud_1', 'type': 'pointscloud_avg', 'params': {'rosbag_filename': outdoors_filename, 'enable_pointcloud': 'true'}}, + # {'name': 'align_depth_color_1', 'type': 'align_depth_color', 'params': {'rosbag_filename': outdoors_filename, 'align_depth': 'true'}}, + # {'name': 'align_depth_ir1_1', 'type': 'align_depth_ir1', 'params': {'rosbag_filename': outdoors_filename, 'align_depth': 'true'}}, {'name': 'depth_avg_decimation_1', 'type': 'depth_avg_decimation', 'params': {'rosbag_filename': outdoors_filename, 'filters': 'decimation'}}, - {'name': 'align_depth_ir1_decimation_1', 'type': 'align_depth_ir1_decimation', 'params': {'rosbag_filename': outdoors_filename, 'filters': 'decimation', 'align_depth': 'true'}}, + # {'name': 'align_depth_ir1_decimation_1', 'type': 'align_depth_ir1_decimation', 'params': {'rosbag_filename': outdoors_filename, 'filters': 'decimation', 'align_depth': 'true'}}, # {'name': 'static_tf_1', 'type': 'static_tf', 'params': {'rosbag_filename': outdoors_filename}}, # Not working in Travis... # {'name': 'accel_up_1', 'type': 'accel_up', 'params': {'rosbag_filename': './records/D435i_Depth_and_IMU_Stands_still.bag'}}, # Keeps failing on Travis CI. See https://github.com/IntelRealSense/realsense-ros/pull/1504#issuecomment-744226704 ] diff --git a/realsense2_camera/src/base_realsense_node.cpp b/realsense2_camera/src/base_realsense_node.cpp index 54533f42db..274b6282d1 100644 --- a/realsense2_camera/src/base_realsense_node.cpp +++ b/realsense2_camera/src/base_realsense_node.cpp @@ -1,10 +1,14 @@ -#include "../include/base_realsense_node.h" +#include "realsense2_camera/base_realsense_node.h" #include "assert.h" #include #include #include #include +#include +#include +#include + using namespace realsense2_camera; using namespace ddynamic_reconfigure; @@ -92,14 +96,12 @@ BaseRealSenseNode::BaseRealSenseNode(ros::NodeHandle& nodeHandle, _encoding[RS2_STREAM_DEPTH] = sensor_msgs::image_encodings::TYPE_16UC1; // ROS message type _unit_step_size[RS2_STREAM_DEPTH] = sizeof(uint16_t); // sensor_msgs::ImagePtr row step size _stream_name[RS2_STREAM_DEPTH] = "depth"; - _depth_aligned_encoding[RS2_STREAM_DEPTH] = sensor_msgs::image_encodings::TYPE_16UC1; // Types for confidence stream _image_format[RS2_STREAM_CONFIDENCE] = CV_8UC1; // CVBridge type _encoding[RS2_STREAM_CONFIDENCE] = sensor_msgs::image_encodings::MONO8; // ROS message type _unit_step_size[RS2_STREAM_CONFIDENCE] = sizeof(uint8_t); // sensor_msgs::ImagePtr row step size _stream_name[RS2_STREAM_CONFIDENCE] = "confidence"; - _depth_aligned_encoding[RS2_STREAM_CONFIDENCE] = sensor_msgs::image_encodings::TYPE_16UC1; // Infrared stream _format[RS2_STREAM_INFRARED] = RS2_FORMAT_Y8; @@ -108,7 +110,6 @@ BaseRealSenseNode::BaseRealSenseNode(ros::NodeHandle& nodeHandle, _encoding[RS2_STREAM_INFRARED] = sensor_msgs::image_encodings::MONO8; // ROS message type _unit_step_size[RS2_STREAM_INFRARED] = sizeof(uint8_t); // sensor_msgs::ImagePtr row step size _stream_name[RS2_STREAM_INFRARED] = "infra"; - _depth_aligned_encoding[RS2_STREAM_INFRARED] = sensor_msgs::image_encodings::TYPE_16UC1; // Types for color stream _image_format[RS2_STREAM_COLOR] = CV_8UC3; // CVBridge type @@ -122,7 +123,6 @@ BaseRealSenseNode::BaseRealSenseNode(ros::NodeHandle& nodeHandle, _encoding[RS2_STREAM_FISHEYE] = sensor_msgs::image_encodings::MONO8; // ROS message type _unit_step_size[RS2_STREAM_FISHEYE] = sizeof(uint8_t); // sensor_msgs::ImagePtr row step size _stream_name[RS2_STREAM_FISHEYE] = "fisheye"; - _depth_aligned_encoding[RS2_STREAM_FISHEYE] = sensor_msgs::image_encodings::TYPE_16UC1; // Types for Motion-Module streams _stream_name[RS2_STREAM_GYRO] = "gyro"; @@ -137,11 +137,14 @@ BaseRealSenseNode::BaseRealSenseNode(ros::NodeHandle& nodeHandle, BaseRealSenseNode::~BaseRealSenseNode() { // Kill dynamic transform thread - if (_tf_t) + _is_running = false; + _cv_tf.notify_one(); + if (_tf_t && _tf_t->joinable()) _tf_t->join(); + if (_update_functions_t && _update_functions_t->joinable()) + _update_functions_t->join(); - _is_running = false; - _cv.notify_one(); + _cv_monitoring.notify_one(); if (_monitoring_t && _monitoring_t->joinable()) { _monitoring_t->join(); @@ -150,18 +153,19 @@ BaseRealSenseNode::~BaseRealSenseNode() std::set module_names; for (const std::pair>& profile : _enabled_profiles) { - std::string module_name = _sensors[profile.first].get_info(RS2_CAMERA_INFO_NAME); - std::pair< std::set::iterator, bool> res = module_names.insert(module_name); - if (res.second) + try { - try + std::string module_name = _sensors[profile.first].get_info(RS2_CAMERA_INFO_NAME); + std::pair< std::set::iterator, bool> res = module_names.insert(module_name); + if (res.second) { _sensors[profile.first].stop(); _sensors[profile.first].close(); } - catch(const rs2::wrong_api_call_sequence_error& e) - { - } + } + catch (const rs2::error& e) + { + ROS_ERROR_STREAM("Exception: " << e.what()); } } } @@ -223,7 +227,7 @@ void BaseRealSenseNode::setupErrorCallback() { ROS_WARN_STREAM("Hardware Notification:" << n.get_description() << "," << n.get_timestamp() << "," << n.get_severity() << "," << n.get_category()); } - if (error_strings.end() != find_if(error_strings.begin(), error_strings.end(), [&n] (std::string err) + if (error_strings.end() != std::find_if(error_strings.begin(), error_strings.end(), [&n] (std::string err) {return (n.get_description().find(err) != std::string::npos); })) { ROS_ERROR_STREAM("Performing Hardware Reset."); @@ -238,6 +242,7 @@ void BaseRealSenseNode::publishTopics() getParameters(); setupDevice(); setupFilters(); + registerHDRoptions(); registerDynamicReconfigCb(_node_handle); setupErrorCallback(); enable_devices(); @@ -248,6 +253,7 @@ void BaseRealSenseNode::publishTopics() publishStaticTransforms(); publishIntrinsics(); startMonitoring(); + publishServices(); ROS_INFO_STREAM("RealSense Node Is Up!"); } @@ -552,10 +558,28 @@ void BaseRealSenseNode::registerDynamicOption(ros::NodeHandle& nh, rs2::options << ". Removing this parameter from dynamic reconfigure options."); continue; } - ddynrec->registerEnumVariable( - option_name, option_value, - [option, sensor](int new_value) { sensor.set_option(option, new_value); }, - sensor.get_option_description(option), enum_dict); + if (module_name == "stereo_module" && i == RS2_OPTION_SEQUENCE_ID) + { + ddynrec->registerEnumVariable( + option_name, option_value, + [this, option, sensor, module_name](int new_value) + { + sensor.set_option(option, new_value); + _update_functions_v.push_back([this, module_name, sensor]() + {set_sensor_parameter_to_ros(module_name, sensor, RS2_OPTION_EXPOSURE);}); + _update_functions_v.push_back([this, module_name, sensor]() + {set_sensor_parameter_to_ros(module_name, sensor, RS2_OPTION_GAIN);}); + _update_functions_cv.notify_one(); + }, + sensor.get_option_description(option), enum_dict); + } + else + { + ddynrec->registerEnumVariable( + option_name, option_value, + [option, sensor](int new_value) { sensor.set_option(option, new_value); }, + sensor.get_option_description(option), enum_dict); + } } } catch(const rs2::backend_error& e) @@ -593,6 +617,88 @@ void BaseRealSenseNode::registerDynamicReconfigCb(ros::NodeHandle& nh) ROS_INFO("Done Setting Dynamic reconfig parameters."); } +void BaseRealSenseNode::registerHDRoptions() +{ + if (std::find_if(std::begin(_filters), std::end(_filters), [](NamedFilter f){return f._name == "hdr_merge";}) == std::end(_filters)) + return; + + std::string module_name; + std::vector options{RS2_OPTION_EXPOSURE, RS2_OPTION_GAIN}; + for(rs2::sensor sensor : _dev_sensors) + { + if (!sensor.is()) continue; + std::string module_name = create_graph_resource_name(sensor.get_info(RS2_CAMERA_INFO_NAME)); + // Read initialization parameters and initialize hdr_merge filter: + unsigned int seq_size = sensor.get_option(RS2_OPTION_SEQUENCE_SIZE); + for (unsigned int seq_id = 1; seq_id <= seq_size; seq_id++ ) + { + sensor.set_option(RS2_OPTION_SEQUENCE_ID, seq_id); + for (rs2_option& option : options) + { + std::stringstream param_name_str; + param_name_str << module_name << "/" << create_graph_resource_name(rs2_option_to_string(option)) << "/" << seq_id; + std::string param_name(param_name_str.str()); + ROS_INFO_STREAM("Reading option: " << param_name); + int option_value = sensor.get_option(option); + + int user_set_option_value; + _pnh.param(param_name, user_set_option_value, option_value); + _pnh.deleteParam(param_name); + if (option_value != user_set_option_value) + { + ROS_INFO_STREAM("Set " << rs2_option_to_string(option) << " to " << user_set_option_value); + sensor.set_option(option, user_set_option_value); + } + } + } + sensor.set_option(RS2_OPTION_SEQUENCE_ID, 0); // Set back to default. + sensor.set_option(RS2_OPTION_HDR_ENABLED, true); + + monitor_update_functions(); // Start parameters update thread + + break; + } +} + +void BaseRealSenseNode::set_sensor_parameter_to_ros(const std::string& module_name, rs2::options sensor, rs2_option option) +{ + dynamic_reconfigure::ReconfigureRequest srv_req; + dynamic_reconfigure::ReconfigureResponse srv_resp; + dynamic_reconfigure::IntParameter int_param; + dynamic_reconfigure::Config conf; + + int_param.name = create_graph_resource_name(rs2_option_to_string(option)); //"sequence_id" + int_param.value = sensor.get_option(option); + conf.ints.push_back(int_param); + + srv_req.config = conf; + + std::string service_name = module_name + "/set_parameters"; + if (ros::service::call(service_name, srv_req, srv_resp)) { + ROS_INFO_STREAM( "call to set " << service_name << "/" << int_param.name << " to " << int_param.value << " succeeded"); + } else { + ROS_ERROR_STREAM( "call to set " << service_name << "/" << int_param.name << " to " << int_param.value << " failed"); + } +} + +void BaseRealSenseNode::monitor_update_functions() +{ + int time_interval(1000); + std::function func = [this, time_interval](){ + std::mutex mu; + std::unique_lock lock(mu); + while(_is_running) { + _update_functions_cv.wait_for(lock, std::chrono::milliseconds(time_interval), [&]{return !_is_running || !_update_functions_v.empty();}); + while (!_update_functions_v.empty()) + { + _update_functions_v.back()(); + _update_functions_v.pop_back(); + } + } + }; + _update_functions_t = std::make_shared(func); +} + rs2_stream BaseRealSenseNode::rs2_string_to_stream(std::string str) { if (str == "RS2_STREAM_ANY") @@ -645,17 +751,17 @@ void BaseRealSenseNode::getParameters() for (auto& stream : IMAGE_STREAMS) { std::string param_name(_stream_name[stream.first] + "_width"); - ROS_DEBUG_STREAM("reading parameter:" << param_name); _pnh.param(param_name, _width[stream], IMAGE_WIDTH); + ROS_DEBUG_STREAM("parameter:" << param_name << " = " << _width[stream]); param_name = _stream_name[stream.first] + "_height"; - ROS_DEBUG_STREAM("reading parameter:" << param_name); _pnh.param(param_name, _height[stream], IMAGE_HEIGHT); + ROS_DEBUG_STREAM("parameter:" << param_name << " = " << _height[stream]); param_name = _stream_name[stream.first] + "_fps"; - ROS_DEBUG_STREAM("reading parameter:" << param_name); _pnh.param(param_name, _fps[stream], IMAGE_FPS); + ROS_DEBUG_STREAM("parameter:" << param_name << " = " << _fps[stream]); param_name = "enable_" + STREAM_NAME(stream); - ROS_DEBUG_STREAM("reading parameter:" << param_name); _pnh.param(param_name, _enable[stream], true); + ROS_DEBUG_STREAM("parameter:" << param_name << " = " << _enable[stream]); } for (auto& stream : HID_STREAMS) @@ -696,15 +802,14 @@ void BaseRealSenseNode::getParameters() _pnh.param("imu_optical_frame_id", _optical_frame_id[GYRO], DEFAULT_IMU_OPTICAL_FRAME_ID); } - for (auto& stream : IMAGE_STREAMS) { - if (stream == DEPTH || stream == CONFIDENCE) continue; - if (stream.second > 1) continue; + stream_index_pair stream(COLOR); std::string param_name(static_cast(std::ostringstream() << "aligned_depth_to_" << STREAM_NAME(stream) << "_frame_id").str()); _pnh.param(param_name, _depth_aligned_frame_id[stream], ALIGNED_DEPTH_TO_FRAME_ID(stream)); } _pnh.param("allow_no_texture_points", _allow_no_texture_points, ALLOW_NO_TEXTURE_POINTS); + _pnh.param("ordered_pc", _ordered_pc, ORDERED_POINTCLOUD); _pnh.param("clip_distance", _clipping_distance, static_cast(-1.0)); _pnh.param("linear_accel_cov", _linear_accel_cov, static_cast(0.01)); _pnh.param("angular_velocity_cov", _angular_velocity_cov, static_cast(0.01)); @@ -828,7 +933,7 @@ void BaseRealSenseNode::setupDevice() ros::shutdown(); exit(1); } - ROS_INFO_STREAM(std::string(sensor.get_info(RS2_CAMERA_INFO_NAME)) << " was found."); + ROS_INFO_STREAM(module_name << " was found."); } // Update "enable" map @@ -863,7 +968,7 @@ void BaseRealSenseNode::setupPublishers() { if (_enable[stream]) { - std::stringstream image_raw, camera_info; + std::stringstream image_raw, camera_info, topic_metadata; bool rectified_image = false; if (stream == DEPTH || stream == CONFIDENCE || stream == INFRA1 || stream == INFRA2) rectified_image = true; @@ -871,12 +976,14 @@ void BaseRealSenseNode::setupPublishers() std::string stream_name(STREAM_NAME(stream)); image_raw << stream_name << "/image_" << ((rectified_image)?"rect_":"") << "raw"; camera_info << stream_name << "/camera_info"; + topic_metadata << stream_name << "/metadata"; std::shared_ptr frequency_diagnostics(new FrequencyDiagnostics(_fps[stream], stream_name, _serial_no)); _image_publishers[stream] = {image_transport.advertise(image_raw.str(), 1), frequency_diagnostics}; _info_publisher[stream] = _node_handle.advertise(camera_info.str(), 1); + _metadata_publishers[stream] = std::make_shared(_node_handle.advertise(topic_metadata.str(), 1)); - if (_align_depth && (stream != DEPTH) && (stream != CONFIDENCE) && stream.second < 2) + if (_align_depth && stream == COLOR) { std::stringstream aligned_image_raw, aligned_camera_info; aligned_image_raw << "aligned_depth_to_" << stream_name << "/image_raw"; @@ -907,16 +1014,19 @@ void BaseRealSenseNode::setupPublishers() if (_enable[GYRO]) { _imu_publishers[GYRO] = _node_handle.advertise("gyro/sample", 100); + _metadata_publishers[GYRO] = std::make_shared(_node_handle.advertise("gyro/metadata", 1)); } if (_enable[ACCEL]) { _imu_publishers[ACCEL] = _node_handle.advertise("accel/sample", 100); + _metadata_publishers[ACCEL] = std::make_shared(_node_handle.advertise("accel/metadata", 1)); } } if (_enable[POSE]) { _imu_publishers[POSE] = _node_handle.advertise("odom/sample", 100); + _metadata_publishers[POSE] = std::make_shared(_node_handle.advertise("odom/metadata", 1)); } @@ -945,59 +1055,6 @@ void BaseRealSenseNode::setupPublishers() } } -void BaseRealSenseNode::publishAlignedDepthToOthers(rs2::frameset frames, const ros::Time& t) -{ - for (auto it = frames.begin(); it != frames.end(); ++it) - { - auto frame = (*it); - auto stream_type = frame.get_profile().stream_type(); - - if (RS2_STREAM_DEPTH == stream_type) - continue; - - auto stream_index = frame.get_profile().stream_index(); - if (stream_index > 1) - { - continue; - } - stream_index_pair sip{stream_type, stream_index}; - auto& info_publisher = _depth_aligned_info_publisher.at(sip); - auto& image_publisher = _depth_aligned_image_publishers.at(sip); - - if(0 != info_publisher.getNumSubscribers() || - 0 != image_publisher.first.getNumSubscribers()) - { - std::shared_ptr align; - try{ - align = _align.at(stream_type); - } - catch(const std::out_of_range& e) - { - ROS_DEBUG_STREAM("Allocate align filter for:" << rs2_stream_to_string(sip.first) << sip.second); - align = (_align[stream_type] = std::make_shared(stream_type)); - } - rs2::frameset processed = frames.apply_filter(*align); - std::vector frames_to_publish; - frames_to_publish.push_back(processed.get_depth_frame()); // push_back(aligned_depth_frame) - for (std::vector::const_iterator filter_it = _filters.begin(); filter_it != _filters.end(); filter_it++) - { - if (filter_it->_name == "colorizer") - { - frames_to_publish.push_back(filter_it->_filter->process(frames_to_publish.back())); //push_back(colorized) - break; - } - } - - publishFrame(frames_to_publish.back(), t, sip, - _depth_aligned_image, - _depth_aligned_info_publisher, - _depth_aligned_image_publishers, _depth_aligned_seq, - _depth_aligned_camera_info, _optical_frame_id, - _depth_aligned_encoding); - } - } -} - void BaseRealSenseNode::enable_devices() { for (auto& elem : IMAGE_STREAMS) @@ -1006,6 +1063,7 @@ void BaseRealSenseNode::enable_devices() { auto& sens = _sensors[elem]; auto profiles = sens.get_stream_profiles(); + rs2::stream_profile default_profile, selected_profile; for (auto& profile : profiles) { auto video_profile = profile.as(); @@ -1016,34 +1074,50 @@ void BaseRealSenseNode::enable_devices() ", Height: " << video_profile.height() << ", FPS: " << video_profile.fps()); - if ((video_profile.stream_type() == elem.first) && - (_width[elem] == 0 || video_profile.width() == _width[elem]) && - (_height[elem] == 0 || video_profile.height() == _height[elem]) && - (_fps[elem] == 0 || video_profile.fps() == _fps[elem]) && - (_format.find(elem.first) == _format.end() || video_profile.format() == _format[elem.first] ) && - video_profile.stream_index() == elem.second) + if (profile.stream_type() == elem.first && profile.stream_index() == elem.second) { - _width[elem] = video_profile.width(); - _height[elem] = video_profile.height(); - _fps[elem] = video_profile.fps(); - - _enabled_profiles[elem].push_back(profile); - - _image[elem] = cv::Mat(_height[elem], _width[elem], _image_format[elem.first], cv::Scalar(0, 0, 0)); - - ROS_INFO_STREAM(STREAM_NAME(elem) << " stream is enabled - width: " << _width[elem] << ", height: " << _height[elem] << ", fps: " << _fps[elem] << ", " << "Format: " << video_profile.format()); - break; + if (profile.is_default()) + { + default_profile = profile; + } + if ((_width[elem] == 0 || video_profile.width() == _width[elem]) && + (_height[elem] == 0 || video_profile.height() == _height[elem]) && + (_fps[elem] == 0 || video_profile.fps() == _fps[elem]) && + (_format.find(elem.first) == _format.end() || video_profile.format() == _format[elem.first] )) + { + selected_profile = profile; + break; + } } + } - if (_enabled_profiles.find(elem) == _enabled_profiles.end()) + if (!selected_profile) { - ROS_WARN_STREAM("Given stream configuration is not supported by the device! " << + ROS_WARN_STREAM_COND((_width[elem]!=-1 || _height[elem]!=-1 || _fps[elem]!=-1), "Given stream configuration is not supported by the device! " << " Stream: " << rs2_stream_to_string(elem.first) << ", Stream Index: " << elem.second << ", Width: " << _width[elem] << ", Height: " << _height[elem] << ", FPS: " << _fps[elem] << ", Format: " << ((_format.find(elem.first) == _format.end())? "None":rs2_format_to_string(rs2_format(_format[elem.first])))); + if (default_profile) + { + ROS_WARN_STREAM_COND((_width[elem]!=-1 || _height[elem]!=-1 || _fps[elem]!=-1), "Using default profile instead."); + selected_profile = default_profile; + } + } + if (selected_profile) + { + auto video_profile = selected_profile.as(); + _width[elem] = video_profile.width(); + _height[elem] = video_profile.height(); + _fps[elem] = video_profile.fps(); + _enabled_profiles[elem].push_back(selected_profile); + _image[elem] = cv::Mat(_height[elem], _width[elem], _image_format[elem.first], cv::Scalar(0, 0, 0)); + ROS_INFO_STREAM(STREAM_NAME(elem) << " stream is enabled - width: " << _width[elem] << ", height: " << _height[elem] << ", fps: " << _fps[elem] << ", " << "Format: " << video_profile.format()); + } + else + { _enable[elem] = false; } } @@ -1064,6 +1138,7 @@ void BaseRealSenseNode::enable_devices() { auto& sens = _sensors[elem]; auto profiles = sens.get_stream_profiles(); + rs2::stream_profile default_profile, selected_profile; ROS_DEBUG_STREAM("Available profiles:"); for (rs2::stream_profile& profile : profiles) { @@ -1072,24 +1147,35 @@ void BaseRealSenseNode::enable_devices() } for (rs2::stream_profile& profile : profiles) { - if (profile.stream_type() == elem.first && - (_fps[elem] == 0 || profile.fps() == _fps[elem])) + if (profile.stream_type() == elem.first) { - _fps[elem] = profile.fps(); - _enabled_profiles[elem].push_back(profile); - break; + if (profile.is_default()) + default_profile = profile; + if (_fps[elem] == 0 || profile.fps() == _fps[elem]) + { + selected_profile = profile; + break; + } } } - if (_enabled_profiles.find(elem) == _enabled_profiles.end()) + if (!selected_profile) { std::string stream_name(STREAM_NAME(elem)); - ROS_WARN_STREAM("No mathcing profile found for " << stream_name << " with fps=" << _fps[elem]); - ROS_WARN_STREAM("profiles found for " <::const_iterator s_iter=filters_str.begin(); s_iter!=filters_str.end(); s_iter++) + bool use_hdr_filter(false); + for (std::vector::iterator s_iter=filters_str.begin(); s_iter!=filters_str.end(); s_iter++) { + (*s_iter).erase(std::remove_if((*s_iter).begin(), (*s_iter).end(), isspace), (*s_iter).end()); // Remove spaces + if ((*s_iter) == "colorizer") { use_colorizer_filter = true; @@ -1136,6 +1225,10 @@ void BaseRealSenseNode::setupFilters() { assert(_pointcloud); // For now, it is set in getParameters().. } + else if ((*s_iter) == "hdr_merge") + { + use_hdr_filter = true; + } else if ((*s_iter).size() > 0) { ROS_ERROR_STREAM("Unknown Filter: " << (*s_iter)); @@ -1149,32 +1242,42 @@ void BaseRealSenseNode::setupFilters() _filters.push_back(NamedFilter("disparity_end", std::make_shared(false))); ROS_INFO("Done Add Filter: disparity"); } + if (use_hdr_filter) + { + ROS_INFO("Add Filter: hdr_merge"); + _filters.insert(_filters.begin(),NamedFilter("hdr_merge", std::make_shared())); + ROS_INFO("Add Filter: sequence_id_filter"); + _filters.insert(_filters.begin(),NamedFilter("sequence_id_filter", std::make_shared())); + } if (use_decimation_filter) { ROS_INFO("Add Filter: decimation"); _filters.insert(_filters.begin(),NamedFilter("decimation", std::make_shared())); } + if (_align_depth) + { + _filters.push_back(NamedFilter("align_to_color", std::make_shared(RS2_STREAM_COLOR))); + } if (use_colorizer_filter) { ROS_INFO("Add Filter: colorizer"); - _filters.push_back(NamedFilter("colorizer", std::make_shared())); - + _colorizer = std::make_shared(); + _filters.push_back(NamedFilter("colorizer", _colorizer)); // Types for depth stream _image_format[DEPTH.first] = _image_format[COLOR.first]; // CVBridge type _encoding[DEPTH.first] = _encoding[COLOR.first]; // ROS message type _unit_step_size[DEPTH.first] = _unit_step_size[COLOR.first]; // sensor_msgs::ImagePtr row step size - _depth_aligned_encoding[RS2_STREAM_INFRARED] = _encoding[COLOR.first]; // ROS message type _depth_aligned_encoding[RS2_STREAM_COLOR] = _encoding[COLOR.first]; // ROS message type - _depth_aligned_encoding[RS2_STREAM_FISHEYE] = _encoding[COLOR.first]; // ROS message type _width[DEPTH] = _width[COLOR]; _height[DEPTH] = _height[COLOR]; - _image[DEPTH] = cv::Mat(_height[DEPTH], _width[DEPTH], _image_format[DEPTH.first], cv::Scalar(0, 0, 0)); + _image[DEPTH] = cv::Mat(std::max(0, _height[DEPTH]), std::max(0, _width[DEPTH]), _image_format[DEPTH.first], cv::Scalar(0, 0, 0)); } if (_pointcloud) { ROS_INFO("Add Filter: pointcloud"); - _filters.push_back(NamedFilter("pointcloud", std::make_shared(_pointcloud_texture.first, _pointcloud_texture.second))); + _pointcloud_filter = std::make_shared(_pointcloud_texture.first, _pointcloud_texture.second); + _filters.push_back(NamedFilter("pointcloud", _pointcloud_filter)); } ROS_INFO("num_filters: %d", static_cast(_filters.size())); } @@ -1353,17 +1456,16 @@ void BaseRealSenseNode::imu_callback_sync(rs2::frame frame, imu_sync_method sync bool placeholder_false(false); if (_is_initialized_time_base.compare_exchange_strong(placeholder_false, true) ) { - setBaseTime(frame_time, RS2_TIMESTAMP_DOMAIN_SYSTEM_TIME == frame.get_frame_timestamp_domain()); + _is_initialized_time_base = setBaseTime(frame_time, frame.get_frame_timestamp_domain()); } seq += 1; - double elapsed_camera_ms = (/*ms*/ frame_time - /*ms*/ _camera_time_base) / 1000.0; if (0 != _synced_imu_publisher->getNumSubscribers()) { auto crnt_reading = *(reinterpret_cast(frame.get_data())); Eigen::Vector3d v(crnt_reading.x, crnt_reading.y, crnt_reading.z); - CimuData imu_data(stream_index, v, elapsed_camera_ms); + CimuData imu_data(stream_index, v, frameSystemTimeSec(frame)); std::deque imu_msgs; switch (sync_method) { @@ -1378,9 +1480,7 @@ void BaseRealSenseNode::imu_callback_sync(rs2::frame frame, imu_sync_method sync while (imu_msgs.size()) { sensor_msgs::Imu imu_msg = imu_msgs.front(); - ros::Time t(_ros_time_base.toSec() + imu_msg.header.stamp.toSec()); imu_msg.header.seq = seq; - imu_msg.header.stamp = t; ImuMessage_AddDefaultValues(imu_msg); _synced_imu_publisher->Publish(imu_msg); ROS_DEBUG("Publish united %s stream", rs2_stream_to_string(frame.get_profile().stream_type())); @@ -1388,7 +1488,7 @@ void BaseRealSenseNode::imu_callback_sync(rs2::frame frame, imu_sync_method sync } } m_mutex.unlock(); -}; +} void BaseRealSenseNode::imu_callback(rs2::frame frame) { @@ -1397,7 +1497,7 @@ void BaseRealSenseNode::imu_callback(rs2::frame frame) bool placeholder_false(false); if (_is_initialized_time_base.compare_exchange_strong(placeholder_false, true) ) { - setBaseTime(frame_time, RS2_TIMESTAMP_DOMAIN_SYSTEM_TIME == frame.get_frame_timestamp_domain()); + _is_initialized_time_base = setBaseTime(frame_time, frame.get_frame_timestamp_domain()); } ROS_DEBUG("Frame arrived: stream: %s ; index: %d ; Timestamp Domain: %s", @@ -1406,11 +1506,9 @@ void BaseRealSenseNode::imu_callback(rs2::frame frame) rs2_timestamp_domain_to_string(frame.get_frame_timestamp_domain())); auto stream_index = (stream == GYRO.first)?GYRO:ACCEL; + ros::Time t(frameSystemTimeSec(frame)); if (0 != _imu_publishers[stream_index].getNumSubscribers()) { - double elapsed_camera_ms = (/*ms*/ frame_time - /*ms*/ _camera_time_base) / 1000.0; - ros::Time t(_ros_time_base.toSec() + elapsed_camera_ms); - auto imu_msg = sensor_msgs::Imu(); ImuMessage_AddDefaultValues(imu_msg); imu_msg.header.frame_id = _optical_frame_id[stream_index]; @@ -1434,6 +1532,7 @@ void BaseRealSenseNode::imu_callback(rs2::frame frame) _imu_publishers[stream_index].publish(imu_msg); ROS_DEBUG("Publish %s stream", rs2_stream_to_string(frame.get_profile().stream_type())); } + publishMetadata(frame, t, _optical_frame_id[stream_index]); } void BaseRealSenseNode::pose_callback(rs2::frame frame) @@ -1442,7 +1541,7 @@ void BaseRealSenseNode::pose_callback(rs2::frame frame) bool placeholder_false(false); if (_is_initialized_time_base.compare_exchange_strong(placeholder_false, true) ) { - setBaseTime(frame_time, RS2_TIMESTAMP_DOMAIN_SYSTEM_TIME == frame.get_frame_timestamp_domain()); + _is_initialized_time_base = setBaseTime(frame_time, frame.get_frame_timestamp_domain()); } ROS_DEBUG("Frame arrived: stream: %s ; index: %d ; Timestamp Domain: %s", @@ -1451,8 +1550,7 @@ void BaseRealSenseNode::pose_callback(rs2::frame frame) rs2_timestamp_domain_to_string(frame.get_frame_timestamp_domain())); const auto& stream_index(POSE); rs2_pose pose = frame.as().get_pose_data(); - double elapsed_camera_ms = (/*ms*/ frame_time - /*ms*/ _camera_time_base) / 1000.0; - ros::Time t(_ros_time_base.toSec() + elapsed_camera_ms); + ros::Time t(frameSystemTimeSec(frame)); geometry_msgs::PoseStamped pose_msg; pose_msg.pose.position.x = -pose.translation.z; @@ -1527,6 +1625,7 @@ void BaseRealSenseNode::pose_callback(rs2::frame frame) _imu_publishers[stream_index].publish(odom_msg); ROS_DEBUG("Publish %s stream", rs2_stream_to_string(frame.get_profile().stream_type())); } + publishMetadata(frame, t, _frame_id[POSE]); } void BaseRealSenseNode::frame_callback(rs2::frame frame) @@ -1542,23 +1641,13 @@ void BaseRealSenseNode::frame_callback(rs2::frame frame) bool placeholder_false(false); if (_is_initialized_time_base.compare_exchange_strong(placeholder_false, true) ) { - setBaseTime(frame_time, RS2_TIMESTAMP_DOMAIN_SYSTEM_TIME == frame.get_frame_timestamp_domain()); - } - - ros::Time t; - if (_sync_frames) - { - t = ros::Time::now(); - } - else - { - t = ros::Time(_ros_time_base.toSec()+ (/*ms*/ frame_time - /*ms*/ _camera_time_base) / /*ms to seconds*/ 1000); + _is_initialized_time_base = setBaseTime(frame_time, frame.get_frame_timestamp_domain()); } + ros::Time t(frameSystemTimeSec(frame)); if (frame.is()) { ROS_DEBUG("Frameset arrived."); - bool is_depth_arrived = false; auto frameset = frame.as(); ROS_DEBUG("List of frameset before applying filters: size: %d", static_cast(frameset.size())); for (auto it = frameset.begin(); it != frameset.end(); ++it) @@ -1574,103 +1663,88 @@ void BaseRealSenseNode::frame_callback(rs2::frame frame) runFirstFrameInitialization(stream_type); } // Clip depth_frame for max range: - rs2::depth_frame depth_frame = frameset.get_depth_frame(); - if (depth_frame && _clipping_distance > 0) + rs2::depth_frame original_depth_frame = frameset.get_depth_frame(); + bool is_color_frame(frameset.get_color_frame()); + if (original_depth_frame && _clipping_distance > 0) { - clip_depth(depth_frame, _clipping_distance); + clip_depth(original_depth_frame, _clipping_distance); } ROS_DEBUG("num_filters: %d", static_cast(_filters.size())); for (std::vector::const_iterator filter_it = _filters.begin(); filter_it != _filters.end(); filter_it++) { ROS_DEBUG("Applying filter: %s", filter_it->_name.c_str()); + if ((filter_it->_name == "pointcloud") && (!original_depth_frame)) + continue; + if ((filter_it->_name == "align_to_color") && (!is_color_frame)) + continue; frameset = filter_it->_filter->process(frameset); } ROS_DEBUG("List of frameset after applying filters: size: %d", static_cast(frameset.size())); + bool sent_depth_frame(false); for (auto it = frameset.begin(); it != frameset.end(); ++it) { auto f = (*it); auto stream_type = f.get_profile().stream_type(); auto stream_index = f.get_profile().stream_index(); auto stream_format = f.get_profile().format(); - auto stream_unique_id = f.get_profile().unique_id(); - - ROS_DEBUG("Frameset contain (%s, %d, %s %d) frame. frame_number: %llu ; frame_TS: %f ; ros_TS(NSec): %lu", - rs2_stream_to_string(stream_type), stream_index, rs2_format_to_string(stream_format), stream_unique_id, frame.get_frame_number(), frame_time, t.toNSec()); - } - ROS_DEBUG("END OF LIST"); - ROS_DEBUG_STREAM("Remove streams with same type and index:"); - // TODO - Fix the following issue: - // Currently publishers are set using a map of stream type and index only. - // It means that colorized depth image and colorized depth image - // use the same publisher. - // As a workaround we remove the earlier one, the original one, assuming that if colorizer filter is - // set it means that that's what the client wants. - // - bool points_in_set(false); - std::vector frames_to_publish; - std::vector is_in_set; - for (auto it = frameset.begin(); it != frameset.end(); ++it) - { - auto f = (*it); - auto stream_type = f.get_profile().stream_type(); - auto stream_index = f.get_profile().stream_index(); - auto stream_format = f.get_profile().format(); - if (f.is()) - { - if (!points_in_set) - { - points_in_set = true; - frames_to_publish.push_back(f); - } - continue; - } stream_index_pair sip{stream_type,stream_index}; - if (std::find(is_in_set.begin(), is_in_set.end(), sip) == is_in_set.end()) - { - is_in_set.push_back(sip); - frames_to_publish.push_back(f); - } - if (_align_depth && stream_type == RS2_STREAM_DEPTH && stream_format == RS2_FORMAT_Z16) - { - is_depth_arrived = true; - } - } - - for (auto it = frames_to_publish.begin(); it != frames_to_publish.end(); ++it) - { - auto f = (*it); - auto stream_type = f.get_profile().stream_type(); - auto stream_index = f.get_profile().stream_index(); - auto stream_format = f.get_profile().format(); ROS_DEBUG("Frameset contain (%s, %d, %s) frame. frame_number: %llu ; frame_TS: %f ; ros_TS(NSec): %lu", - rs2_stream_to_string(stream_type), stream_index, rs2_format_to_string(stream_format), frame.get_frame_number(), frame_time, t.toNSec()); + rs2_stream_to_string(stream_type), stream_index, rs2_format_to_string(stream_format), f.get_frame_number(), frame_time, t.toNSec()); + if (f.is()) + ROS_DEBUG_STREAM("frame: " << f.as().get_width() << " x " << f.as().get_height()); if (f.is()) { - if (0 != _pointcloud_publisher.getNumSubscribers()) + publishPointCloud(f.as(), t, frameset); + continue; + } + if (stream_type == RS2_STREAM_DEPTH) + { + if (sent_depth_frame) continue; + sent_depth_frame = true; + if (_align_depth && is_color_frame) { - ROS_DEBUG("Publish pointscloud"); - publishPointCloud(f.as(), t, frameset); + publishFrame(f, t, COLOR, + _depth_aligned_image, + _depth_aligned_info_publisher, + _depth_aligned_image_publishers, + false, + _depth_aligned_seq, + _depth_aligned_camera_info, + _depth_aligned_encoding); + continue; } - continue; } - stream_index_pair sip{stream_type,stream_index}; publishFrame(f, t, sip, _image, _info_publisher, - _image_publishers, _seq, - _camera_info, _optical_frame_id, + _image_publishers, + true, + _seq, + _camera_info, _encoding); } - - if (_align_depth && is_depth_arrived) + if (original_depth_frame && _align_depth) { - ROS_DEBUG("publishAlignedDepthToOthers(...)"); - publishAlignedDepthToOthers(frameset, t); + rs2::frame frame_to_send; + if (_colorizer) + frame_to_send = _colorizer->process(original_depth_frame); + else + frame_to_send = original_depth_frame; + + publishFrame(frame_to_send, t, + DEPTH, + _image, + _info_publisher, + _image_publishers, + true, + _seq, + _camera_info, + _encoding); } } else if (frame.is()) @@ -1693,8 +1767,10 @@ void BaseRealSenseNode::frame_callback(rs2::frame frame) sip, _image, _info_publisher, - _image_publishers, _seq, - _camera_info, _optical_frame_id, + _image_publishers, + true, + _seq, + _camera_info, _encoding); } } @@ -1703,7 +1779,7 @@ void BaseRealSenseNode::frame_callback(rs2::frame frame) ROS_ERROR_STREAM("An error has occurred during frame callback: " << ex.what()); } _synced_imu_publisher->Resume(); -}; // frame_callback +} // frame_callback void BaseRealSenseNode::multiple_message_callback(rs2::frame frame, imu_sync_method sync_method) { @@ -1723,21 +1799,36 @@ void BaseRealSenseNode::multiple_message_callback(rs2::frame frame, imu_sync_met } } -void BaseRealSenseNode::setBaseTime(double frame_time, bool warn_no_metadata) +bool BaseRealSenseNode::setBaseTime(double frame_time, rs2_timestamp_domain time_domain) { - ROS_WARN_COND(warn_no_metadata, "Frame metadata isn't available! (frame_timestamp_domain = RS2_TIMESTAMP_DOMAIN_SYSTEM_TIME)"); + ROS_WARN_ONCE(time_domain == RS2_TIMESTAMP_DOMAIN_SYSTEM_TIME ? "Frame metadata isn't available! (frame_timestamp_domain = RS2_TIMESTAMP_DOMAIN_SYSTEM_TIME)" : ""); + if (time_domain == RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK) + { + ROS_WARN("frame's time domain is HARDWARE_CLOCK. Timestamps may reset periodically."); + _ros_time_base = ros::Time::now(); + _camera_time_base = frame_time; + return true; + } + return false; +} - _ros_time_base = ros::Time::now(); - _camera_time_base = frame_time; +double BaseRealSenseNode::frameSystemTimeSec(rs2::frame frame) +{ + if (frame.get_frame_timestamp_domain() == RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK) + { + double elapsed_camera_ms = (/*ms*/ frame.get_timestamp() - /*ms*/ _camera_time_base) / 1000.0; + return (_ros_time_base.toSec() + elapsed_camera_ms); + } + else + { + return (frame.get_timestamp() / 1000.0); + } } void BaseRealSenseNode::setupStreams() { ROS_INFO("setupStreams..."); try{ - std::shared_ptr left_profile; - std::shared_ptr right_profile; - // Publish image stream info for (auto& profiles : _enabled_profiles) { @@ -1747,25 +1838,17 @@ void BaseRealSenseNode::setupStreams() { auto video_profile = profile.as(); updateStreamCalibData(video_profile); - - // stream index: 1=left, 2=right - if (video_profile.stream_index() == 1) { left_profile = std::make_shared(video_profile); } - if (video_profile.stream_index() == 2) { right_profile = std::make_shared(video_profile); } } } } - if (left_profile && right_profile) { - updateExtrinsicsCalibData(*left_profile, *right_profile); - } - // Streaming IMAGES std::map > profiles; std::map active_sensors; for (const std::pair>& profile : _enabled_profiles) { std::string module_name = _sensors[profile.first].get_info(RS2_CAMERA_INFO_NAME); - ROS_INFO_STREAM("insert " << rs2_stream_to_string(profile.second.begin()->stream_type()) + ROS_DEBUG_STREAM("insert " << rs2_stream_to_string(profile.second.begin()->stream_type()) << " to " << module_name); profiles[module_name].insert(profiles[module_name].begin(), profile.second.begin(), @@ -1825,11 +1908,17 @@ void BaseRealSenseNode::updateStreamCalibData(const rs2::video_stream_profile& v _camera_info[stream_index].P.at(10) = 1; _camera_info[stream_index].P.at(11) = 0; - if (intrinsic.model == RS2_DISTORTION_KANNALA_BRANDT4) + // Set Tx, Ty for right camera + if (stream_index.second == 2) { - _camera_info[stream_index].distortion_model = "equidistant"; - } else { - _camera_info[stream_index].distortion_model = "plumb_bob"; + stream_index_pair sip1{stream_index.first, 1}; + if (_enable[sip1]) + { + const auto& ex = getAProfile(stream_index).get_extrinsics_to(getAProfile(sip1)); + _camera_info[stream_index].header.frame_id = _optical_frame_id[sip1]; + _camera_info[stream_index].P.at(3) = -intrinsic.fx * ex.translation[0] + 0.0; // Tx - avoid -0.0 values. + _camera_info[stream_index].P.at(7) = -intrinsic.fy * ex.translation[1] + 0.0; // Ty - avoid -0.0 values. + } } // set R (rotation matrix) values to identity matrix @@ -1843,8 +1932,16 @@ void BaseRealSenseNode::updateStreamCalibData(const rs2::video_stream_profile& v _camera_info[stream_index].R.at(7) = 0.0; _camera_info[stream_index].R.at(8) = 1.0; - _camera_info[stream_index].D.resize(5); - for (int i = 0; i < 5; i++) + int coeff_size(5); + if (intrinsic.model == RS2_DISTORTION_KANNALA_BRANDT4) + { + _camera_info[stream_index].distortion_model = "equidistant"; + coeff_size = 4; + } else { + _camera_info[stream_index].distortion_model = "plumb_bob"; + } + _camera_info[stream_index].D.resize(coeff_size); + for (int i = 0; i < coeff_size; i++) { _camera_info[stream_index].D.at(i) = intrinsic.coeffs[i]; } @@ -1869,59 +1966,6 @@ void BaseRealSenseNode::updateStreamCalibData(const rs2::video_stream_profile& v } } -void BaseRealSenseNode::updateExtrinsicsCalibData(const rs2::video_stream_profile& left_video_profile, const rs2::video_stream_profile& right_video_profile) -{ - stream_index_pair left{left_video_profile.stream_type(), left_video_profile.stream_index()}; - stream_index_pair right{right_video_profile.stream_type(), right_video_profile.stream_index()}; - - // Get the relative extrinsics between the left and right camera - auto LEFT_T_RIGHT = right_video_profile.get_extrinsics_to(left_video_profile); - - auto R = Eigen::Map>(LEFT_T_RIGHT.rotation); - auto T = Eigen::Map>(LEFT_T_RIGHT.translation); - - // force y- and z-axis components to be 0 (but do we also need to force P(0,3) and P(1,3) to be 0?) - T[1] = 0; - T[2] = 0; - - Eigen::Matrix RT; - RT << R, T; - - auto K_right = Eigen::Map>(_camera_info[right].K.data()); - - // Compute Projection matrix for the right camera - auto P_right = K_right.cast() * RT; - - // Note that all matrices are stored in row-major format - // 1. Leave the left rotation matrix as identity - // 2. Set the right rotation matrix - _camera_info[right].R.at(0) = LEFT_T_RIGHT.rotation[0]; - _camera_info[right].R.at(1) = LEFT_T_RIGHT.rotation[1]; - _camera_info[right].R.at(2) = LEFT_T_RIGHT.rotation[2]; - _camera_info[right].R.at(3) = LEFT_T_RIGHT.rotation[3]; - _camera_info[right].R.at(4) = LEFT_T_RIGHT.rotation[4]; - _camera_info[right].R.at(5) = LEFT_T_RIGHT.rotation[5]; - _camera_info[right].R.at(6) = LEFT_T_RIGHT.rotation[6]; - _camera_info[right].R.at(7) = LEFT_T_RIGHT.rotation[7]; - _camera_info[right].R.at(8) = LEFT_T_RIGHT.rotation[8]; - - // 3. Leave the left projection matrix - // 4. Set the right projection matrix - _camera_info[right].P.at(0) = P_right(0,0); - _camera_info[right].P.at(1) = P_right(0,1); - _camera_info[right].P.at(2) = P_right(0,2); - _camera_info[right].P.at(3) = P_right(0,3); - _camera_info[right].P.at(4) = P_right(1,0); - _camera_info[right].P.at(5) = P_right(1,1); - _camera_info[right].P.at(6) = P_right(1,2); - _camera_info[right].P.at(7) = P_right(1,3); - _camera_info[right].P.at(8) = P_right(2,0); - _camera_info[right].P.at(9) = P_right(2,1); - _camera_info[right].P.at(10) = P_right(2,2); - _camera_info[right].P.at(11) = P_right(2,3); - -} - tf::Quaternion BaseRealSenseNode::rotationMatrixToQuaternion(const float rotation[9]) const { Eigen::Matrix3f m; @@ -2018,7 +2062,6 @@ void BaseRealSenseNode::SetBaseStream() void BaseRealSenseNode::publishStaticTransforms() { rs2::stream_profile base_profile = getAProfile(_base_stream); - // Publish static transforms if (_publish_tf) { @@ -2081,18 +2124,17 @@ void BaseRealSenseNode::publishDynamicTransforms() // Publish transforms for the cameras ROS_WARN("Publishing dynamic camera transforms (/tf) at %g Hz", _tf_publish_rate); - ros::Rate loop_rate(_tf_publish_rate); - - while (ros::ok()) + std::mutex mu; + std::unique_lock lock(mu); + while (ros::ok() && _is_running) { - // Update the time stamp for publication - ros::Time t = ros::Time::now(); - for(auto& msg : _static_tf_msgs) - msg.header.stamp = t; - - _dynamic_tf_broadcaster.sendTransform(_static_tf_msgs); - - loop_rate.sleep(); + _cv_tf.wait_for(lock, std::chrono::milliseconds((int)(1000.0/_tf_publish_rate)), [&]{return (!(_is_running));}); + { + ros::Time t = ros::Time::now(); + for(auto& msg : _static_tf_msgs) + msg.header.stamp = t; + _dynamic_tf_broadcaster.sendTransform(_static_tf_msgs); + } } } @@ -2124,8 +2166,11 @@ void reverse_memcpy(unsigned char* dst, const unsigned char* src, size_t n) void BaseRealSenseNode::publishPointCloud(rs2::points pc, const ros::Time& t, const rs2::frameset& frameset) { - std::vector::iterator pc_filter = find_if(_filters.begin(), _filters.end(), [] (NamedFilter s) { return s._name == "pointcloud"; } ); - rs2_stream texture_source_id = static_cast((int)pc_filter->_filter->get_option(rs2_option::RS2_OPTION_STREAM_FILTER)); + if (0 == _pointcloud_publisher.getNumSubscribers()) + return; + ROS_INFO_STREAM_ONCE("publishing " << (_ordered_pc ? "" : "un") << "ordered pointcloud."); + + rs2_stream texture_source_id = static_cast(_pointcloud_filter->get_option(rs2_option::RS2_OPTION_STREAM_FILTER)); bool use_texture = texture_source_id != RS2_STREAM_ANY; static int warn_count(0); static const int DISPLAY_WARN_NUMBER(5); @@ -2133,14 +2178,14 @@ void BaseRealSenseNode::publishPointCloud(rs2::points pc, const ros::Time& t, co if (use_texture) { std::set available_formats{ rs2_format::RS2_FORMAT_RGB8, rs2_format::RS2_FORMAT_Y8 }; - - texture_frame_itr = find_if(frameset.begin(), frameset.end(), [&texture_source_id, &available_formats] (rs2::frame f) + + texture_frame_itr = std::find_if(frameset.begin(), frameset.end(), [&texture_source_id, &available_formats] (rs2::frame f) {return (rs2_stream(f.get_profile().stream_type()) == texture_source_id) && (available_formats.find(f.get_profile().format()) != available_formats.end()); }); if (texture_frame_itr == frameset.end()) { warn_count++; - std::string texture_source_name = pc_filter->_filter->get_option_value_description(rs2_option::RS2_OPTION_STREAM_FILTER, static_cast(texture_source_id)); + std::string texture_source_name = _pointcloud_filter->get_option_value_description(rs2_option::RS2_OPTION_STREAM_FILTER, static_cast(texture_source_id)); ROS_WARN_STREAM_COND(warn_count == DISPLAY_WARN_NUMBER, "No stream match for pointcloud chosen texture " << texture_source_name); return; } @@ -2153,30 +2198,20 @@ void BaseRealSenseNode::publishPointCloud(rs2::points pc, const ros::Time& t, co const rs2::vertex* vertex = pc.get_vertices(); const rs2::texture_coordinate* color_point = pc.get_texture_coordinates(); - _valid_pc_indices.clear(); - for (size_t point_idx=0; point_idx < pc.size(); point_idx++, vertex++, color_point++) - { - if (static_cast(vertex->z) > 0) - { - float i = static_cast(color_point->u); - float j = static_cast(color_point->v); - if (_allow_no_texture_points || (i >= 0.f && i <= 1.f && j >= 0.f && j <= 1.f)) - { - _valid_pc_indices.push_back(point_idx); - } - } - } - - _msg_pointcloud.header.stamp = t; - _msg_pointcloud.header.frame_id = _optical_frame_id[DEPTH]; - _msg_pointcloud.width = _valid_pc_indices.size(); - _msg_pointcloud.height = 1; - _msg_pointcloud.is_dense = true; + rs2_intrinsics depth_intrin = pc.get_profile().as().get_intrinsics(); sensor_msgs::PointCloud2Modifier modifier(_msg_pointcloud); - modifier.setPointCloud2FieldsByString(1, "xyz"); + modifier.setPointCloud2FieldsByString(1, "xyz"); + modifier.resize(pc.size()); + if (_ordered_pc) + { + _msg_pointcloud.width = depth_intrin.width; + _msg_pointcloud.height = depth_intrin.height; + _msg_pointcloud.is_dense = false; + } vertex = pc.get_vertices(); + size_t valid_count(0); if (use_texture) { rs2::video_frame texture_frame = (*texture_frame_itr).as(); @@ -2207,58 +2242,67 @@ void BaseRealSenseNode::publishPointCloud(rs2::points pc, const ros::Time& t, co color_point = pc.get_texture_coordinates(); float color_pixel[2]; - unsigned int prev_idx(0); - for (auto idx=_valid_pc_indices.begin(); idx != _valid_pc_indices.end(); idx++) + for (size_t point_idx=0; point_idx < pc.size(); point_idx++, vertex++, color_point++) { - unsigned int idx_jump(*idx-prev_idx); - prev_idx = *idx; - vertex+=idx_jump; - color_point+=idx_jump; - - *iter_x = vertex->x; - *iter_y = vertex->y; - *iter_z = vertex->z; - float i(color_point->u); float j(color_point->v); - if (i >= 0.f && i <= 1.f && j >= 0.f && j <= 1.f) + bool valid_color_pixel(i >= 0.f && i <=1.f && j >= 0.f && j <=1.f); + bool valid_pixel(vertex->z > 0 && (valid_color_pixel || _allow_no_texture_points)); + if (valid_pixel || _ordered_pc) { - color_pixel[0] = i * texture_width; - color_pixel[1] = j * texture_height; - int pixx = static_cast(color_pixel[0]); - int pixy = static_cast(color_pixel[1]); - int offset = (pixy * texture_width + pixx) * num_colors; - reverse_memcpy(&(*iter_color), color_data+offset, num_colors); // PointCloud2 order of rgb is bgr. - } + *iter_x = vertex->x; + *iter_y = vertex->y; + *iter_z = vertex->z; - ++iter_x; ++iter_y; ++iter_z; - ++iter_color; + if (valid_color_pixel) + { + color_pixel[0] = i * texture_width; + color_pixel[1] = j * texture_height; + int pixx = static_cast(color_pixel[0]); + int pixy = static_cast(color_pixel[1]); + int offset = (pixy * texture_width + pixx) * num_colors; + reverse_memcpy(&(*iter_color), color_data+offset, num_colors); // PointCloud2 order of rgb is bgr. + } + ++iter_x; ++iter_y; ++iter_z; + ++iter_color; + ++valid_count; + } } } else { std::string format_str = "intensity"; - _msg_pointcloud.point_step = addPointField(_msg_pointcloud, format_str.c_str(), 1, sensor_msgs::PointField::FLOAT32, _msg_pointcloud.point_step); _msg_pointcloud.row_step = _msg_pointcloud.width * _msg_pointcloud.point_step; _msg_pointcloud.data.resize(_msg_pointcloud.height * _msg_pointcloud.row_step); sensor_msgs::PointCloud2Iteratoriter_x(_msg_pointcloud, "x"); sensor_msgs::PointCloud2Iteratoriter_y(_msg_pointcloud, "y"); sensor_msgs::PointCloud2Iteratoriter_z(_msg_pointcloud, "z"); - unsigned int prev_idx(0); - for (auto idx=_valid_pc_indices.begin(); idx != _valid_pc_indices.end(); idx++) - { - unsigned int idx_jump(*idx-prev_idx); - prev_idx = *idx; - vertex+=idx_jump; - *iter_x = vertex->x; - *iter_y = vertex->y; - *iter_z = vertex->z; + for (size_t point_idx=0; point_idx < pc.size(); point_idx++, vertex++) + { + bool valid_pixel(vertex->z > 0); + if (valid_pixel || _ordered_pc) + { + *iter_x = vertex->x; + *iter_y = vertex->y; + *iter_z = vertex->z; - ++iter_x; ++iter_y; ++iter_z; + ++iter_x; ++iter_y; ++iter_z; + ++valid_count; + } } } + _msg_pointcloud.header.stamp = t; + if (_align_depth) _msg_pointcloud.header.frame_id = _optical_frame_id[COLOR]; + else _msg_pointcloud.header.frame_id = _optical_frame_id[DEPTH]; + if (!_ordered_pc) + { + _msg_pointcloud.width = valid_count; + _msg_pointcloud.height = 1; + _msg_pointcloud.is_dense = true; + modifier.resize(valid_count); + } _pointcloud_publisher.publish(_msg_pointcloud); } @@ -2281,8 +2325,8 @@ rs2::stream_profile BaseRealSenseNode::getAProfile(const stream_index_pair& stre { const std::vector profiles = _sensors[stream].get_stream_profiles(); return *(std::find_if(profiles.begin(), profiles.end(), - [&stream] (const rs2::stream_profile& profile) { - return ((profile.stream_type() == stream.first) && (profile.stream_index() == stream.second)); + [&stream] (const rs2::stream_profile& profile) { + return ((profile.stream_type() == stream.first) && (profile.stream_index() == stream.second)); })); } @@ -2321,9 +2365,9 @@ void BaseRealSenseNode::publishFrame(rs2::frame f, const ros::Time& t, std::map& images, const std::map& info_publishers, const std::map& image_publishers, + const bool is_publishMetadata, std::map& seq, std::map& camera_info, - const std::map& optical_frame_id, const std::map& encoding, bool copy_data_from_frame) { @@ -2361,44 +2405,88 @@ void BaseRealSenseNode::publishFrame(rs2::frame f, const ros::Time& t, if(0 != info_publisher.getNumSubscribers() || 0 != image_publisher.first.getNumSubscribers()) { + auto& cam_info = camera_info.at(stream); + if (cam_info.width != width) + { + updateStreamCalibData(f.get_profile().as()); + } + cam_info.header.stamp = t; + cam_info.header.seq = seq[stream]; + info_publisher.publish(cam_info); + sensor_msgs::ImagePtr img; img = cv_bridge::CvImage(std_msgs::Header(), encoding.at(stream.first), image).toImageMsg(); img->width = width; img->height = height; img->is_bigendian = false; img->step = width * bpp; - img->header.frame_id = optical_frame_id.at(stream); + img->header.frame_id = cam_info.header.frame_id; img->header.stamp = t; img->header.seq = seq[stream]; + image_publisher.first.publish(img); + ROS_DEBUG("%s stream published", rs2_stream_to_string(f.get_profile().stream_type())); + } + if (is_publishMetadata) + { auto& cam_info = camera_info.at(stream); - if (cam_info.width != width) + publishMetadata(f, t, cam_info.header.frame_id); + } +} + +void BaseRealSenseNode::publishMetadata(rs2::frame f, const ros::Time& header_time, const std::string& frame_id) +{ + stream_index_pair stream = {f.get_profile().stream_type(), f.get_profile().stream_index()}; + if (_metadata_publishers.find(stream) != _metadata_publishers.end()) + { + auto& md_publisher = _metadata_publishers.at(stream); + if (0 != md_publisher->getNumSubscribers()) { - updateStreamCalibData(f.get_profile().as()); + realsense2_camera::Metadata msg; + msg.header.frame_id = frame_id; + msg.header.stamp = header_time; + std::stringstream json_data; + const char* separator = ","; + json_data << "{"; + // Add additional fields: + json_data << "\"" << "frame_number" << "\":" << f.get_frame_number(); + json_data << separator << "\"" << "clock_domain" << "\":" << "\"" << create_graph_resource_name(rs2_timestamp_domain_to_string(f.get_frame_timestamp_domain())) << "\""; + json_data << separator << "\"" << "frame_timestamp" << "\":" << std::fixed << f.get_timestamp(); + + for (auto i = 0; i < RS2_FRAME_METADATA_COUNT; i++) + { + if (f.supports_frame_metadata((rs2_frame_metadata_value)i)) + { + rs2_frame_metadata_value mparam = (rs2_frame_metadata_value)i; + std::string name = create_graph_resource_name(rs2_frame_metadata_to_string(mparam)); + if (RS2_FRAME_METADATA_FRAME_TIMESTAMP == i) + { + name = "hw_timestamp"; + } + rs2_metadata_type val = f.get_frame_metadata(mparam); + json_data << separator << "\"" << name << "\":" << val; + } + } + json_data << "}"; + msg.json_data = json_data.str(); + md_publisher->publish(msg); } - cam_info.header.stamp = t; - cam_info.header.seq = seq[stream]; - info_publisher.publish(cam_info); - - image_publisher.first.publish(img); - // ROS_INFO_STREAM("fid: " << cam_info.header.seq << ", time: " << std::setprecision (20) << t.toSec()); - ROS_DEBUG("%s stream published", rs2_stream_to_string(f.get_profile().stream_type())); } } bool BaseRealSenseNode::getEnabledProfile(const stream_index_pair& stream_index, rs2::stream_profile& profile) - { - // Assuming that all D400 SKUs have depth sensor - auto profiles = _enabled_profiles[stream_index]; - auto it = std::find_if(profiles.begin(), profiles.end(), - [&](const rs2::stream_profile& profile) - { return (profile.stream_type() == stream_index.first); }); - if (it == profiles.end()) - return false; +{ + // Assuming that all D400 SKUs have depth sensor + auto profiles = _enabled_profiles[stream_index]; + auto it = std::find_if(profiles.begin(), profiles.end(), + [&](const rs2::stream_profile& profile) + { return (profile.stream_type() == stream_index.first); }); + if (it == profiles.end()) + return false; - profile = *it; - return true; - } + profile = *it; + return true; +} void BaseRealSenseNode::startMonitoring() { @@ -2412,7 +2500,7 @@ void BaseRealSenseNode::startMonitoring() std::mutex mu; std::unique_lock lock(mu); while(_is_running) { - _cv.wait_for(lock, std::chrono::milliseconds(time_interval), [&]{return !_is_running;}); + _cv_monitoring.wait_for(lock, std::chrono::milliseconds(time_interval), [&]{return !_is_running;}); if (_is_running) { publish_temperature(); @@ -2446,9 +2534,11 @@ void BaseRealSenseNode::publish_temperature() void BaseRealSenseNode::publish_frequency_update() { + std::chrono::milliseconds timespan(1); for (auto &image_publisher : _image_publishers) { image_publisher.second.second->update(); + std::this_thread::sleep_for(timespan); } } @@ -2463,3 +2553,27 @@ void TemperatureDiagnostics::diagnostics(diagnostic_updater::DiagnosticStatusWra status.summary(0, "OK"); status.add("Index", _crnt_temp); } + +void BaseRealSenseNode::publishServices() +{ + _device_info_srv = std::make_shared(_pnh.advertiseService("device_info", &BaseRealSenseNode::getDeviceInfo, this)); +} + +bool BaseRealSenseNode::getDeviceInfo(realsense2_camera::DeviceInfo::Request&, + realsense2_camera::DeviceInfo::Response& res) +{ + res.device_name = _dev.supports(RS2_CAMERA_INFO_NAME) ? create_graph_resource_name(_dev.get_info(RS2_CAMERA_INFO_NAME)) : ""; + res.serial_number = _dev.supports(RS2_CAMERA_INFO_SERIAL_NUMBER) ? _dev.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER) : ""; + res.firmware_version = _dev.supports(RS2_CAMERA_INFO_FIRMWARE_VERSION) ? _dev.get_info(RS2_CAMERA_INFO_FIRMWARE_VERSION) : ""; + res.usb_type_descriptor = _dev.supports(RS2_CAMERA_INFO_USB_TYPE_DESCRIPTOR) ? _dev.get_info(RS2_CAMERA_INFO_USB_TYPE_DESCRIPTOR) : ""; + res.firmware_update_id = _dev.supports(RS2_CAMERA_INFO_FIRMWARE_UPDATE_ID) ? _dev.get_info(RS2_CAMERA_INFO_FIRMWARE_UPDATE_ID) : ""; + + std::stringstream sensors_names; + for(auto&& sensor : _dev_sensors) + { + sensors_names << create_graph_resource_name(sensor.get_info(RS2_CAMERA_INFO_NAME)) << ","; + } + + res.sensors = sensors_names.str().substr(0, sensors_names.str().size()-1); + return true; +} diff --git a/realsense2_camera/src/realsense_node_factory.cpp b/realsense2_camera/src/realsense_node_factory.cpp index 248df921e7..282662a323 100644 --- a/realsense2_camera/src/realsense_node_factory.cpp +++ b/realsense2_camera/src/realsense_node_factory.cpp @@ -1,9 +1,9 @@ // License: Apache 2.0. See LICENSE file in root directory. // Copyright(c) 2017 Intel Corporation. All Rights Reserved -#include "../include/realsense_node_factory.h" -#include "../include/base_realsense_node.h" -#include "../include/t265_realsense_node.h" +#include "realsense2_camera/realsense_node_factory.h" +#include "realsense2_camera/base_realsense_node.h" +#include "realsense2_camera/t265_realsense_node.h" #include #include #include @@ -209,7 +209,6 @@ void RealSenseNodeFactory::getDevice(rs2::device_list list) ROS_INFO("Resetting device..."); _device.hardware_reset(); _device = rs2::device(); - } catch(const std::exception& ex) { @@ -223,21 +222,7 @@ void RealSenseNodeFactory::change_device_callback(rs2::event_information& info) if (info.was_removed(_device)) { ROS_ERROR("The device has been disconnected!"); - _realSenseNode.reset(nullptr); - _device = rs2::device(); - } - if (!_device) - { - rs2::device_list new_devices = info.get_new_devices(); - if (new_devices.size() > 0) - { - ROS_INFO("Checking new devices..."); - getDevice(new_devices); - if (_device) - { - StartDevice(); - } - } + reset(); } } @@ -254,6 +239,13 @@ bool RealSenseNodeFactory::toggle_sensor_callback(std_srvs::SetBool::Request &re void RealSenseNodeFactory::onInit() { + auto nh = getNodeHandle(); + _init_timer = nh.createWallTimer(ros::WallDuration(0.01), &RealSenseNodeFactory::initialize, this, true); +} + +void RealSenseNodeFactory::initialize(const ros::WallTimerEvent &ignored) +{ + _device = rs2::device(); try { #ifdef BPDEBUG @@ -264,9 +256,13 @@ void RealSenseNodeFactory::onInit() ros::NodeHandle nh = getNodeHandle(); auto privateNh = getPrivateNodeHandle(); privateNh.param("serial_no", _serial_no, std::string("")); - privateNh.param("usb_port_id", _usb_port_id, std::string("")); - privateNh.param("device_type", _device_type, std::string("")); - toggle_sensor_srv = nh.advertiseService("enable", &RealSenseNodeFactory::toggle_sensor_callback, this); + privateNh.param("usb_port_id", _usb_port_id, std::string("")); + privateNh.param("device_type", _device_type, std::string("")); + + if (!toggle_sensor_srv) + { + toggle_sensor_srv = nh.advertiseService("enable", &RealSenseNodeFactory::toggle_sensor_callback, this); + } std::string rosbag_filename(""); privateNh.param("rosbag_filename", rosbag_filename, std::string("")); @@ -291,9 +287,15 @@ void RealSenseNodeFactory::onInit() { privateNh.param("initial_reset", _initial_reset, false); + _is_alive = true; _query_thread = std::thread([=]() { - std::chrono::milliseconds timespan(6000); + double reconnect_timeout; + privateNh.param("reconnect_timeout", reconnect_timeout, 6.0); + double wait_for_device_timeout; + privateNh.param("wait_for_device_timeout", wait_for_device_timeout, -1.0); + std::chrono::milliseconds timespan(static_cast(reconnect_timeout*1e3)); + ros::Time first_try_time = ros::Time::now(); while (_is_alive && !_device) { getDevice(_ctx.query_devices()); @@ -305,10 +307,29 @@ void RealSenseNodeFactory::onInit() } else { - std::this_thread::sleep_for(timespan); + std::chrono::milliseconds actual_timespan(timespan); + if (wait_for_device_timeout > 0) + { + auto time_to_timeout(wait_for_device_timeout - (ros::Time::now() - first_try_time).toSec()); + if (time_to_timeout < 0) + { + ROS_ERROR_STREAM("wait for device timeout of " << wait_for_device_timeout << " secs expired"); + exit(1); + } + else + { + double max_timespan_secs(std::chrono::duration_cast(timespan).count()); + actual_timespan = std::chrono::milliseconds (static_cast(std::min(max_timespan_secs, time_to_timeout) * 1e3)); + } + } + std::this_thread::sleep_for(actual_timespan); } } }); + if (!_reset_srv) + { + _reset_srv = privateNh.advertiseService("reset", &RealSenseNodeFactory::handleReset, this); + } } } catch(const std::exception& ex) @@ -326,44 +347,83 @@ void RealSenseNodeFactory::onInit() void RealSenseNodeFactory::StartDevice() { if (_realSenseNode) _realSenseNode.reset(); - ros::NodeHandle nh = getNodeHandle(); - ros::NodeHandle privateNh = getPrivateNodeHandle(); - // TODO - std::string pid_str(_device.get_info(RS2_CAMERA_INFO_PRODUCT_ID)); - uint16_t pid = std::stoi(pid_str, 0, 16); - switch(pid) + try { - case SR300_PID: - case SR300v2_PID: - case RS400_PID: - case RS405_PID: - case RS410_PID: - case RS460_PID: - case RS415_PID: - case RS420_PID: - case RS420_MM_PID: - case RS430_PID: - case RS430_MM_PID: - case RS430_MM_RGB_PID: - case RS435_RGB_PID: - case RS435i_RGB_PID: - case RS455_PID: - case RS465_PID: - case RS_USB2_PID: - case RS_L515_PID_PRE_PRQ: - case RS_L515_PID: - _realSenseNode = std::unique_ptr(new BaseRealSenseNode(nh, privateNh, _device, _serial_no)); - break; - case RS_T265_PID: - _realSenseNode = std::unique_ptr(new T265RealsenseNode(nh, privateNh, _device, _serial_no)); - break; - default: - ROS_FATAL_STREAM("Unsupported device!" << " Product ID: 0x" << pid_str); - ros::shutdown(); - exit(1); + ros::NodeHandle nh = getNodeHandle(); + ros::NodeHandle privateNh = getPrivateNodeHandle(); + // TODO + std::string pid_str(_device.get_info(RS2_CAMERA_INFO_PRODUCT_ID)); + uint16_t pid = std::stoi(pid_str, 0, 16); + switch(pid) + { + case SR300_PID: + case SR300v2_PID: + case RS400_PID: + case RS405_PID: + case RS410_PID: + case RS460_PID: + case RS415_PID: + case RS420_PID: + case RS420_MM_PID: + case RS430_PID: + case RS430_MM_PID: + case RS430_MM_RGB_PID: + case RS435_RGB_PID: + case RS435i_RGB_PID: + case RS455_PID: + case RS465_PID: + case RS_USB2_PID: + case RS_L515_PID_PRE_PRQ: + case RS_L515_PID: + case RS_L535_PID: + _realSenseNode = std::shared_ptr(new BaseRealSenseNode(nh, privateNh, _device, _serial_no)); + break; + case RS_T265_PID: + _realSenseNode = std::shared_ptr(new T265RealsenseNode(nh, privateNh, _device, _serial_no)); + break; + default: + ROS_FATAL_STREAM("Unsupported device!" << " Product ID: 0x" << pid_str); + ros::shutdown(); + exit(1); + } + assert(_realSenseNode); + _realSenseNode->publishTopics(); + } + catch (const rs2::error& e) + { + ROS_ERROR_STREAM("Exception: " << e.what()); + } +} + +void RealSenseNodeFactory::reset() +{ + _is_alive = false; + if (_query_thread.joinable()) + { + _query_thread.join(); } - assert(_realSenseNode); - _realSenseNode->publishTopics(); + + try + { + _realSenseNode.reset(); + if (_device) + { + _device.hardware_reset(); + _device = rs2::device(); + } + } + catch (const rs2::error& e) + { + ROS_ERROR_STREAM("Exception: " << e.what()); + } + + _init_timer = getNodeHandle().createWallTimer(ros::WallDuration(1.0), &RealSenseNodeFactory::initialize, this, true); +} + +bool RealSenseNodeFactory::handleReset(std_srvs::Empty::Request& request, std_srvs::Empty::Response& response) +{ + reset(); + return true; } void RealSenseNodeFactory::tryGetLogSeverity(rs2_log_severity& severity) const diff --git a/realsense2_camera/src/t265_realsense_node.cpp b/realsense2_camera/src/t265_realsense_node.cpp index 2a4e76db25..9741619818 100644 --- a/realsense2_camera/src/t265_realsense_node.cpp +++ b/realsense2_camera/src/t265_realsense_node.cpp @@ -1,4 +1,4 @@ -#include "../include/t265_realsense_node.h" +#include "realsense2_camera/t265_realsense_node.h" using namespace realsense2_camera; diff --git a/realsense2_camera/srv/DeviceInfo.srv b/realsense2_camera/srv/DeviceInfo.srv new file mode 100644 index 0000000000..d41600c639 --- /dev/null +++ b/realsense2_camera/srv/DeviceInfo.srv @@ -0,0 +1,7 @@ +--- +string device_name +string serial_number +string firmware_version +string usb_type_descriptor +string firmware_update_id +string sensors diff --git a/realsense2_description/CHANGELOG.rst b/realsense2_description/CHANGELOG.rst index 105e129a1e..cd922b929f 100644 --- a/realsense2_description/CHANGELOG.rst +++ b/realsense2_description/CHANGELOG.rst @@ -2,6 +2,34 @@ Changelog for package realsense2_description ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +2.3.2 (2021-11-15) +------------------ +* Add D455 description +* Add missing aluminum material to d415 model. +* Contributors: Gilad Bretter, doronhi + +2.3.1 (2021-07-01) +------------------ +* add imu frames to _l515.urdf.xacro +* Contributors: Simon Honigmann, doronhi + +2.3.0 (2021-05-05) +------------------ + +2.2.24 (2021-04-21) +------------------- +* Add conditional param use_mesh. +* Contributors: Teo Cardoso + +2.2.23 (2021-03-24) +------------------- + +2.2.22 (2021-02-18) +------------------- +* Fix mass of d415 +* Consistent add_plug in xacros and launch files +* Contributors: Manuel Stahl, Tim Übelhör, doronhi + 2.2.21 (2020-12-31) ------------------- diff --git a/realsense2_description/launch/view_d415_model.launch b/realsense2_description/launch/view_d415_model.launch index e802c95c10..670994a780 100644 --- a/realsense2_description/launch/view_d415_model.launch +++ b/realsense2_description/launch/view_d415_model.launch @@ -1,5 +1,5 @@ - + diff --git a/realsense2_description/launch/view_d435_model.launch b/realsense2_description/launch/view_d435_model.launch index d647c1078e..14f1867072 100644 --- a/realsense2_description/launch/view_d435_model.launch +++ b/realsense2_description/launch/view_d435_model.launch @@ -1,5 +1,5 @@ - + diff --git a/realsense2_description/launch/view_d455_model.launch b/realsense2_description/launch/view_d455_model.launch new file mode 100644 index 0000000000..1f6a161214 --- /dev/null +++ b/realsense2_description/launch/view_d455_model.launch @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/realsense2_description/launch/view_l515_model.launch b/realsense2_description/launch/view_l515_model.launch index 1564873977..1ba9c6d498 100644 --- a/realsense2_description/launch/view_l515_model.launch +++ b/realsense2_description/launch/view_l515_model.launch @@ -1,5 +1,5 @@ - + diff --git a/realsense2_description/meshes/d455.stl b/realsense2_description/meshes/d455.stl new file mode 100644 index 0000000000..9b30406e53 Binary files /dev/null and b/realsense2_description/meshes/d455.stl differ diff --git a/realsense2_description/package.xml b/realsense2_description/package.xml index 75f6a958fe..96709be019 100644 --- a/realsense2_description/package.xml +++ b/realsense2_description/package.xml @@ -1,9 +1,8 @@ realsense2_description - 2.2.21 + 2.3.2 RealSense Camera description package for Intel 3D D400 cameras - Sergey Dorodnicov Doron Hirshberg Apache 2.0 diff --git a/realsense2_description/urdf/_d415.urdf.xacro b/realsense2_description/urdf/_d415.urdf.xacro index c7c743cd80..07306bfa0b 100644 --- a/realsense2_description/urdf/_d415.urdf.xacro +++ b/realsense2_description/urdf/_d415.urdf.xacro @@ -12,7 +12,7 @@ aluminum peripherial evaluation case. - + - - - + + + + + + + + + + + + + @@ -66,8 +73,8 @@ aluminum peripherial evaluation case. + - @@ -136,7 +143,7 @@ aluminum peripherial evaluation case. - + diff --git a/realsense2_description/urdf/_d435.urdf.xacro b/realsense2_description/urdf/_d435.urdf.xacro index bf6b66cf3f..92f17b9120 100644 --- a/realsense2_description/urdf/_d435.urdf.xacro +++ b/realsense2_description/urdf/_d435.urdf.xacro @@ -13,8 +13,7 @@ aluminum peripherial evaluation case. - - + - - - - - + + + + + + + + + + + + + + - + @@ -143,7 +150,7 @@ aluminum peripherial evaluation case. - + diff --git a/realsense2_description/urdf/_d455.urdf.xacro b/realsense2_description/urdf/_d455.urdf.xacro new file mode 100644 index 0000000000..b82094e05d --- /dev/null +++ b/realsense2_description/urdf/_d455.urdf.xacro @@ -0,0 +1,197 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/realsense2_description/urdf/_l515.urdf.xacro b/realsense2_description/urdf/_l515.urdf.xacro index c5b3c212f9..5d4da9b792 100644 --- a/realsense2_description/urdf/_l515.urdf.xacro +++ b/realsense2_description/urdf/_l515.urdf.xacro @@ -13,8 +13,7 @@ aluminum peripherial evaluation case. - - + - @@ -45,6 +44,16 @@ aluminum peripherial evaluation case. + + + + + + + + + + @@ -62,10 +71,19 @@ aluminum peripherial evaluation case. - - - - + + + + + + + + + + + + + @@ -142,8 +160,38 @@ aluminum peripherial evaluation case. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + diff --git a/realsense2_description/urdf/_materials.urdf.xacro b/realsense2_description/urdf/_materials.urdf.xacro index 6b365d3949..96239ad12a 100644 --- a/realsense2_description/urdf/_materials.urdf.xacro +++ b/realsense2_description/urdf/_materials.urdf.xacro @@ -8,11 +8,15 @@ Collection of materials to be used in other macros. This avoids the redefinition of materials in case multple cameras are imported. --> - - - - - - - + + + + + + + + + + + diff --git a/realsense2_description/urdf/test_d415_camera.urdf.xacro b/realsense2_description/urdf/test_d415_camera.urdf.xacro index a729f7e04e..aaecade306 100644 --- a/realsense2_description/urdf/test_d415_camera.urdf.xacro +++ b/realsense2_description/urdf/test_d415_camera.urdf.xacro @@ -1,10 +1,12 @@ + + - + diff --git a/realsense2_description/urdf/test_d435_camera.urdf.xacro b/realsense2_description/urdf/test_d435_camera.urdf.xacro index 77e2756d54..7d1da6ccef 100644 --- a/realsense2_description/urdf/test_d435_camera.urdf.xacro +++ b/realsense2_description/urdf/test_d435_camera.urdf.xacro @@ -1,10 +1,12 @@ + + - + diff --git a/realsense2_description/urdf/test_d455_camera.urdf.xacro b/realsense2_description/urdf/test_d455_camera.urdf.xacro new file mode 100644 index 0000000000..c581f756ff --- /dev/null +++ b/realsense2_description/urdf/test_d455_camera.urdf.xacro @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/realsense2_description/urdf/test_l515_camera.urdf.xacro b/realsense2_description/urdf/test_l515_camera.urdf.xacro index 98f993211f..6b1c3354a0 100644 --- a/realsense2_description/urdf/test_l515_camera.urdf.xacro +++ b/realsense2_description/urdf/test_l515_camera.urdf.xacro @@ -1,10 +1,12 @@ + + - +