Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion realsense2_camera/include/base_realsense_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,8 @@ namespace realsense2_camera
void startDynamicTf();
void publishDynamicTransforms();
void publishPointCloud(rs2::points f, const rclcpp::Time& t, const rs2::frameset& frameset);
void publishLabeledPointCloud(rs2::labeled_points points, const rclcpp::Time& t);
bool shouldPublishCameraInfo(const stream_index_pair& sip);
Extrinsics rsExtrinsicsToMsg(const rs2_extrinsics& extrinsics) const;

IMUInfo getImuInfo(const rs2::stream_profile& profile);
Expand Down Expand Up @@ -271,7 +273,7 @@ namespace realsense2_camera

bool _use_intra_process;
std::map<stream_index_pair, std::shared_ptr<image_publisher>> _image_publishers;

rclcpp::Publisher<sensor_msgs::msg::PointCloud2>::SharedPtr _labeled_pointcloud_publisher;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you explain why you have a specific publisher for lpc and not for pc?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Because we can turn on both Depth point cloud and Labeled Point cloud at the same time
These are two different publishers

std::map<stream_index_pair, rclcpp::Publisher<sensor_msgs::msg::Imu>::SharedPtr> _imu_publishers;
std::shared_ptr<rclcpp::Publisher<nav_msgs::msg::Odometry>> _odom_publisher;
std::shared_ptr<SyncedImuPublisher> _synced_imu_publisher;
Expand Down
3 changes: 3 additions & 0 deletions realsense2_camera/include/ros_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,9 @@ namespace realsense2_camera
const stream_index_pair GYRO{RS2_STREAM_GYRO, 0};
const stream_index_pair ACCEL{RS2_STREAM_ACCEL, 0};
const stream_index_pair POSE{RS2_STREAM_POSE, 0};
const stream_index_pair SAFETY{RS2_STREAM_SAFETY, 0};
const stream_index_pair LABELED_POINT_CLOUD{RS2_STREAM_LABELED_POINT_CLOUD, 0};
const stream_index_pair OCCUPANCY{RS2_STREAM_OCCUPANCY, 0};

bool isValidCharInName(char c);

Expand Down
3 changes: 3 additions & 0 deletions realsense2_camera/launch/rs_launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@
{'name': 'enable_gyro', 'default': 'false', 'description': "'enable gyro stream'"},
{'name': 'enable_accel', 'default': 'false', 'description': "'enable accel stream'"},
{'name': 'enable_safety', 'default': 'false', 'description': "'enable safety stream'"},
{'name': 'enable_labeled_point_cloud', 'default': 'false', 'description': "'enable labeled point cloud stream'"},
{'name': 'enable_occupancy', 'default': 'false', 'description': "'enable occupancy stream'"},
{'name': 'depth_mapping_camera.profile', 'default': '0,0,0', 'description': "'depth mapping camera profile'"},
{'name': 'enable_pose', 'default': 'true', 'description': "'enable pose stream'"},
{'name': 'pose_fps', 'default': '200', 'description': "''"},
{'name': 'pointcloud.enable', 'default': 'false', 'description': ''},
Expand Down
174 changes: 125 additions & 49 deletions realsense2_camera/src/base_realsense_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
#include <rclcpp/clock.hpp>
#include <fstream>
#include <image_publisher.h>
#include <sensor_msgs/msg/point_cloud2.hpp>
#include <sensor_msgs/point_cloud2_iterator.hpp>

// Header files for disabling intra-process comms for static broadcaster.
#include <rclcpp/publisher_options.hpp>
Expand Down Expand Up @@ -520,29 +522,36 @@ void BaseRealSenseNode::frame_callback(rs2::frame frame)
if (f.is<rs2::video_frame>())
ROS_DEBUG_STREAM("frame: " << f.as<rs2::video_frame>().get_width() << " x " << f.as<rs2::video_frame>().get_height());

if (f.is<rs2::points>())
if (f.is<rs2::labeled_points>())
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

better change this if statement to else if - do you agree?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

{
publishLabeledPointCloud(f.as<rs2::labeled_points>(), t);
publishMetadata(f, t, OPTICAL_FRAME_ID(sip));
}
else if (f.is<rs2::points>())
{
publishPointCloud(f.as<rs2::points>(), t, frameset);
continue;
}
if (stream_type == RS2_STREAM_DEPTH)
else
{
if (sent_depth_frame) continue;
sent_depth_frame = true;
if (_align_depth_filter->is_enabled())
if (stream_type == RS2_STREAM_DEPTH)
{
publishFrame(f, t, COLOR,
if (sent_depth_frame) continue;
sent_depth_frame = true;
if (_align_depth_filter->is_enabled())
{
publishFrame(f, t, COLOR,
_depth_aligned_image,
_depth_aligned_info_publisher,
_depth_aligned_image_publishers,
false);
continue;
continue;
}
}
publishFrame(f, t, sip,
_image,
_info_publisher,
_image_publishers);
}
publishFrame(f, t, sip,
_image,
_info_publisher,
_image_publishers);
}
if (original_depth_frame && _align_depth_filter->is_enabled())
{
Expand Down Expand Up @@ -580,6 +589,16 @@ void BaseRealSenseNode::frame_callback(rs2::frame frame)
_info_publisher,
_image_publishers);
}
else if (frame.is<rs2::labeled_points>())
{
auto stream_type = frame.get_profile().stream_type();
auto stream_index = frame.get_profile().stream_index();
stream_index_pair sip{stream_type,stream_index};
ROS_DEBUG("Single labeled point cloud frame arrived (%s, %d). frame_number: %llu ; frame_TS: %f ; ros_TS(NSec): %lu",
rs2_stream_to_string(stream_type), stream_index, frame.get_frame_number(), frame_time, t.nanoseconds());
publishLabeledPointCloud(frame.as<rs2::labeled_points>(), t);
publishMetadata(frame, t, OPTICAL_FRAME_ID(sip));
}
_synced_imu_publisher->Resume();
} // frame_callback

Expand Down Expand Up @@ -785,6 +804,59 @@ void BaseRealSenseNode::publishPointCloud(rs2::points pc, const rclcpp::Time& t,
_pc_filter->Publish(pc, t, frameset, frame_id);
}

bool BaseRealSenseNode::shouldPublishCameraInfo(const stream_index_pair& sip)
{
const rs2_stream stream = sip.first;
return (stream != RS2_STREAM_SAFETY && stream != RS2_STREAM_OCCUPANCY && stream != RS2_STREAM_LABELED_POINT_CLOUD);
}

void BaseRealSenseNode::publishLabeledPointCloud(rs2::labeled_points pc, const rclcpp::Time& t){
if(0 == _labeled_pointcloud_publisher->get_subscription_count())
return;

ROS_DEBUG("Publishing Labeled Point Cloud Frame");

// Create the PointCloud message
sensor_msgs::msg::PointCloud2::UniquePtr msg_pointcloud = std::make_unique<sensor_msgs::msg::PointCloud2>();

// Define the fields of the PointCloud message
sensor_msgs::PointCloud2Modifier modifier(*msg_pointcloud);

modifier.setPointCloud2Fields(4, "x", 1, sensor_msgs::msg::PointField::FLOAT32,
"y", 1, sensor_msgs::msg::PointField::FLOAT32,
"z", 1, sensor_msgs::msg::PointField::FLOAT32,
"label", 1, sensor_msgs::msg::PointField::UINT8);
modifier.resize(pc.size());

// Fill the PointCloud message with data
sensor_msgs::PointCloud2Iterator<float> iter_x(*msg_pointcloud, "x");
sensor_msgs::PointCloud2Iterator<float> iter_y(*msg_pointcloud, "y");
sensor_msgs::PointCloud2Iterator<float> iter_z(*msg_pointcloud, "z");
sensor_msgs::PointCloud2Iterator<uint8_t> iter_label(*msg_pointcloud, "label");
const rs2::vertex* vertex = pc.get_vertices();
const uint8_t* label = pc.get_labels();
msg_pointcloud->width = 320;
Copy link
Contributor Author

@SamerKhshiboun SamerKhshiboun May 29, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@remibettan
should we add get_width / get_height for rs2::labeleld_points?
because in the future, labeled point cloud might support different heights/widths/resolutions

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The API size() should be used instead of width and height

Copy link
Contributor Author

@SamerKhshiboun SamerKhshiboun May 30, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

but I used size to get the size of the whole PC.
what if I want to get width/height to fill the structure of point cloud message ?
or should I assume this is unordered Point Cloud and set the height 1 and width = frame.size() ?
Reference: http://docs.ros.org/en/melodic/api/sensor_msgs/html/msg/PointCloud2.html
2D structure of the point cloud. If the cloud is unordered, height is 1 and width is the length of the point cloud.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Well, you may this. Because, as for point cloud, there is no meaning to height and width there, since vertices are passed, and not a frame

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I understand we are getting unordered vertices, not an ordered ones.
But, its better to wait for @benlev inputs here, since he might want this meta info in the msg itself.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This was a valid question :)
LPC resolution is now changed and I am phasing the same question..

msg_pointcloud->height = 180;
msg_pointcloud->point_step = 3*sizeof(float) + sizeof(uint8_t);
msg_pointcloud->row_step = msg_pointcloud->width * msg_pointcloud->point_step;
msg_pointcloud->data.resize(msg_pointcloud->height * msg_pointcloud->row_step);

for (size_t point_idx=0; point_idx < pc.size(); point_idx++, vertex++, label++)
{
*iter_x = vertex->x;
*iter_y = vertex->y;
*iter_z = vertex->z;
*iter_label = *label;
++iter_x; ++iter_y; ++iter_z; ++iter_label;
}

msg_pointcloud->header.stamp = t;
msg_pointcloud->header.frame_id = OPTICAL_FRAME_ID(LABELED_POINT_CLOUD);

// Publish the PointCloud message
_labeled_pointcloud_publisher->publish(std::move(msg_pointcloud));
}


Extrinsics BaseRealSenseNode::rsExtrinsicsToMsg(const rs2_extrinsics& extrinsics) const
{
Expand Down Expand Up @@ -861,50 +933,54 @@ void BaseRealSenseNode::publishFrame(rs2::frame f, const rclcpp::Time& t,
image = fix_depth_scale(image, _depth_scaled_image[stream]);
}

if (info_publishers.find(stream) == info_publishers.end() ||
image_publishers.find(stream) == image_publishers.end())
// if stream is on, and is not a SC stream, publish cameraInfo
if(shouldPublishCameraInfo(stream) && info_publishers.find(stream) != info_publishers.end())
{
auto& info_publisher = info_publishers.at(stream);
if(0 != info_publisher->get_subscription_count())
{
// Stream is already disabled.
return;
auto& cam_info = _camera_info.at(stream);
if (cam_info.width != width)
{
updateStreamCalibData(f.get_profile().as<rs2::video_stream_profile>());
}
cam_info.header.stamp = t;

info_publisher->publish(cam_info);
}
auto& info_publisher = info_publishers.at(stream);
auto& image_publisher = image_publishers.at(stream);
if(0 != info_publisher->get_subscription_count() ||
0 != image_publisher->get_subscription_count())
}

if (image_publishers.find(stream) != image_publishers.end())
{
auto& cam_info = _camera_info.at(stream);
if (cam_info.width != width)
auto &image_publisher = image_publishers.at(stream);
if (0 != image_publisher->get_subscription_count())
{
updateStreamCalibData(f.get_profile().as<rs2::video_stream_profile>());
}
cam_info.header.stamp = t;
info_publisher->publish(cam_info);
// Prepare image topic to be published
// We use UniquePtr for allow intra-process publish when subscribers of that type are available
sensor_msgs::msg::Image::UniquePtr img(new sensor_msgs::msg::Image());

// Prepare image topic to be published
// We use UniquePtr for allow intra-process publish when subscribers of that type are available
sensor_msgs::msg::Image::UniquePtr img(new sensor_msgs::msg::Image());
if (!img)
{
ROS_ERROR("sensor image message allocation failed, frame was dropped");
return;
}

if (!img)
{
ROS_ERROR("sensor image message allocation failed, frame was dropped");
return;
// Convert the CV::Mat into a ROS image message (1 copy is done here)
cv_bridge::CvImage(std_msgs::msg::Header(), _encoding.at(bpp), image).toImageMsg(*img);

// Convert OpenCV Mat to ROS Image
img->header.frame_id = OPTICAL_FRAME_ID(stream);
img->header.stamp = t;
img->height = height;
img->width = width;
img->is_bigendian = false;
img->step = width * bpp;
// Transfer the unique pointer ownership to the RMW
sensor_msgs::msg::Image *msg_address = img.get();
image_publisher->publish(std::move(img));

ROS_DEBUG_STREAM(rs2_stream_to_string(f.get_profile().stream_type()) << " stream published, message address: " << std::hex << msg_address);
}

// Convert the CV::Mat into a ROS image message (1 copy is done here)
cv_bridge::CvImage(std_msgs::msg::Header(), _encoding.at(bpp), image).toImageMsg(*img);

// Convert OpenCV Mat to ROS Image
img->header.frame_id = OPTICAL_FRAME_ID(stream);
img->header.stamp = t;
img->height = height;
img->width = width;
img->is_bigendian = false;
img->step = width * bpp;
// Transfer the unique pointer ownership to the RMW
sensor_msgs::msg::Image* msg_address = img.get();
image_publisher->publish(std::move(img));

ROS_DEBUG_STREAM(rs2_stream_to_string(f.get_profile().stream_type()) << " stream published, message address: " << std::hex << msg_address);
}
if (is_publishMetadata)
{
Expand Down
88 changes: 54 additions & 34 deletions realsense2_camera/src/rs_node_setup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,8 @@ void BaseRealSenseNode::setAvailableSensors()
if (sensor.is<rs2::depth_sensor>() ||
sensor.is<rs2::color_sensor>() ||
sensor.is<rs2::fisheye_sensor>() ||
sensor.is<rs2::safety_sensor>())
sensor.is<rs2::safety_sensor>() ||
sensor.is<rs2::depth_mapping_sensor>())
{
ROS_DEBUG_STREAM("Set " << module_name << " as VideoSensor.");
rosSensor = std::make_unique<RosSensor>(sensor, _parameters, frame_callback_function, update_sensor_func, hardware_reset_func, _diagnostics_updater, _logger, _use_intra_process, _dev.is<playback>());
Expand Down Expand Up @@ -188,6 +189,11 @@ void BaseRealSenseNode::stopPublishers(const std::vector<stream_profile>& profil
_info_publisher.erase(sip);
_depth_aligned_image_publishers.erase(sip);
_depth_aligned_info_publisher.erase(sip);

if(_labeled_pointcloud_publisher)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In which case would this condition be false?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if it was not initialized at all (e.g., the camera doesn't support Depth Mapping Sensor)

{
_labeled_pointcloud_publisher.reset();
}
}
else if (profile.is<rs2::motion_stream_profile>())
{
Expand Down Expand Up @@ -217,52 +223,66 @@ void BaseRealSenseNode::startPublishers(const std::vector<stream_profile>& profi

if (profile.is<rs2::video_stream_profile>())
{
std::stringstream image_raw, camera_info;
bool rectified_image = false;
if (sensor.rs2::sensor::is<rs2::depth_sensor>())
rectified_image = true;

image_raw << stream_name << "/image_" << ((rectified_image)?"rect_":"") << "raw";
camera_info << stream_name << "/camera_info";

// We can use 2 types of publishers:
// Native RCL publisher that support intra-process zero-copy comunication
// image-transport package publisher that adds a commpressed image topic if package is found installed
if (_use_intra_process)
if (profile.stream_type() != RS2_STREAM_LABELED_POINT_CLOUD)
{
_image_publishers[sip] = std::make_shared<image_rcl_publisher>(_node, image_raw.str(), qos);
}
else
{
_image_publishers[sip] = std::make_shared<image_transport_publisher>(_node, image_raw.str(), qos);
ROS_DEBUG_STREAM("image transport publisher was created for topic" << image_raw.str());
}
std::stringstream image_raw, camera_info;
bool rectified_image = false;
if (sensor.rs2::sensor::is<rs2::depth_sensor>())
rectified_image = true;

_info_publisher[sip] = _node.create_publisher<sensor_msgs::msg::CameraInfo>(camera_info.str(),
rclcpp::QoS(rclcpp::QoSInitialization::from_rmw(info_qos), info_qos));

if (_align_depth_filter->is_enabled() && (sip != DEPTH) && sip.second < 2)
{
std::stringstream aligned_image_raw, aligned_camera_info;
aligned_image_raw << "aligned_depth_to_" << stream_name << "/image_raw";
aligned_camera_info << "aligned_depth_to_" << stream_name << "/camera_info";

std::string aligned_stream_name = "aligned_depth_to_" + stream_name;
image_raw << stream_name << "/image_" << ((rectified_image)?"rect_":"") << "raw";
camera_info << stream_name << "/camera_info";

// We can use 2 types of publishers:
// Native RCL publisher that support intra-process zero-copy comunication
// image-transport package publisher that add's a commpressed image topic if the package is installed
// image-transport package publisher that adds a commpressed image topic if package is found installed
if (_use_intra_process)
{
_depth_aligned_image_publishers[sip] = std::make_shared<image_rcl_publisher>(_node, aligned_image_raw.str(), qos);
_image_publishers[sip] = std::make_shared<image_rcl_publisher>(_node, image_raw.str(), qos);
}
else
{
_depth_aligned_image_publishers[sip] = std::make_shared<image_transport_publisher>(_node, aligned_image_raw.str(), qos);
_image_publishers[sip] = std::make_shared<image_transport_publisher>(_node, image_raw.str(), qos);
ROS_DEBUG_STREAM("image transport publisher was created for topic" << image_raw.str());
}
_depth_aligned_info_publisher[sip] = _node.create_publisher<sensor_msgs::msg::CameraInfo>(aligned_camera_info.str(),

// create cameraInfo publishers only for non-SC streams
if(shouldPublishCameraInfo(sip))
{
_info_publisher[sip] = _node.create_publisher<sensor_msgs::msg::CameraInfo>(camera_info.str(),
rclcpp::QoS(rclcpp::QoSInitialization::from_rmw(info_qos), info_qos));
}

if (_align_depth_filter->is_enabled() && (sip != DEPTH) && sip.second < 2)
{
std::stringstream aligned_image_raw, aligned_camera_info;
aligned_image_raw << "aligned_depth_to_" << stream_name << "/image_raw";
aligned_camera_info << "aligned_depth_to_" << stream_name << "/camera_info";

std::string aligned_stream_name = "aligned_depth_to_" + stream_name;

// We can use 2 types of publishers:
// Native RCL publisher that support intra-process zero-copy comunication
// image-transport package publisher that add's a commpressed image topic if the package is installed
if (_use_intra_process)
{
_depth_aligned_image_publishers[sip] = std::make_shared<image_rcl_publisher>(_node, aligned_image_raw.str(), qos);
}
else
{
_depth_aligned_image_publishers[sip] = std::make_shared<image_transport_publisher>(_node, aligned_image_raw.str(), qos);
ROS_DEBUG_STREAM("image transport publisher was created for topic" << image_raw.str());
}
_depth_aligned_info_publisher[sip] = _node.create_publisher<sensor_msgs::msg::CameraInfo>(aligned_camera_info.str(),
rclcpp::QoS(rclcpp::QoSInitialization::from_rmw(info_qos), info_qos));
}
}
else {

// special handling for labeled point cloud stream, since it a topic of PointCloud messages
// and not a normal image publisher
_labeled_pointcloud_publisher = _node.create_publisher<sensor_msgs::msg::PointCloud2>("labeled_point_cloud/points",
rclcpp::QoS(rclcpp::QoSInitialization::from_rmw(qos),qos));
}
}
else if (profile.is<rs2::motion_stream_profile>())
Expand Down