40#include <rclcpp/qos.hpp>
41#include <rclcpp/version.h>
42#include <tf2_geometry_msgs/tf2_geometry_msgs.hpp>
44#if __has_include(<tf2/LinearMath/Vector3.hpp>)
45#include <tf2/LinearMath/Vector3.hpp>
47#include <tf2/LinearMath/Vector3.h>
49#if __has_include(<tf2/LinearMath/Transform.hpp>)
50#include <tf2/LinearMath/Transform.hpp>
52#include <tf2/LinearMath/Transform.h>
54#include <geometric_shapes/shape_operations.h>
55#include <sensor_msgs/image_encodings.hpp>
66 , image_topic_(
"depth")
68 , near_clipping_plane_distance_(0.3)
69 , far_clipping_plane_distance_(5.0)
70 , shadow_threshold_(0.04)
72 , padding_offset_(0.02)
74 , skip_vertical_pixels_(4)
75 , skip_horizontal_pixels_(6)
76 , image_callback_count_(0)
77 , average_callback_dt_(0.0)
85 , logger_(
moveit::getLogger(
"moveit.ros.depth_image_octomap_updater"))
91 sub_depth_image_.shutdown();
98 node_->get_parameter(name_space +
".image_topic", image_topic_) &&
99 node_->get_parameter(name_space +
".queue_size", queue_size_) &&
100 node_->get_parameter(name_space +
".near_clipping_plane_distance", near_clipping_plane_distance_) &&
101 node_->get_parameter(name_space +
".far_clipping_plane_distance", far_clipping_plane_distance_) &&
102 node_->get_parameter(name_space +
".shadow_threshold", shadow_threshold_) &&
103 node_->get_parameter(name_space +
".padding_scale", padding_scale_) &&
104 node_->get_parameter(name_space +
".padding_offset", padding_offset_) &&
105 node_->get_parameter(name_space +
".max_update_rate", max_update_rate_) &&
106 node_->get_parameter(name_space +
".skip_vertical_pixels", skip_vertical_pixels_) &&
107 node_->get_parameter(name_space +
".skip_horizontal_pixels", skip_horizontal_pixels_) &&
108 node_->get_parameter(name_space +
".filtered_cloud_topic", filtered_cloud_topic_) &&
109 node_->get_parameter(name_space +
".ns", ns_);
112 catch (
const rclcpp::exceptions::InvalidParameterTypeException& e)
114 RCLCPP_ERROR_STREAM(logger_, e.what() <<
'\n');
122 input_depth_transport_ = std::make_unique<image_transport::ImageTransport>(node_);
123 model_depth_transport_ = std::make_unique<image_transport::ImageTransport>(node_);
124 filtered_depth_transport_ = std::make_unique<image_transport::ImageTransport>(node_);
125 filtered_label_transport_ = std::make_unique<image_transport::ImageTransport>(node_);
128 free_space_updater_ = std::make_unique<LazyFreeSpaceUpdater>(
tree_);
131 mesh_filter_ = std::make_unique<mesh_filter::MeshFilter<mesh_filter::StereoCameraModel>>(
133 mesh_filter_->parameters().setDepthRange(near_clipping_plane_distance_, far_clipping_plane_distance_);
134 mesh_filter_->setShadowThreshold(shadow_threshold_);
135 mesh_filter_->setPaddingOffset(padding_offset_);
136 mesh_filter_->setPaddingScale(padding_scale_);
137 mesh_filter_->setTransformCallback(
145 pub_model_depth_image_ = model_depth_transport_->advertiseCamera(
"model_depth", 1);
147 std::string prefix =
"";
151 pub_model_depth_image_ = model_depth_transport_->advertiseCamera(prefix +
"model_depth", 1);
152 if (!filtered_cloud_topic_.empty())
154 pub_filtered_depth_image_ = filtered_depth_transport_->advertiseCamera(prefix + filtered_cloud_topic_, 1);
158 pub_filtered_depth_image_ = filtered_depth_transport_->advertiseCamera(prefix +
"filtered_depth", 1);
161 pub_filtered_label_image_ = filtered_label_transport_->advertiseCamera(prefix +
"filtered_label", 1);
165#if RCLCPP_VERSION_GTE(30, 0, 0)
166 image_transport::create_camera_subscription(
167 *node_, image_topic_,
168 [
this](
const sensor_msgs::msg::Image::ConstSharedPtr& depth_msg,
169 const sensor_msgs::msg::CameraInfo::ConstSharedPtr& info_msg) {
170 return depthImageCallback(depth_msg, info_msg);
172 "raw", rclcpp::SensorDataQoS());
175 image_transport::create_camera_subscription(
176 node_.get(), image_topic_,
177 [
this](
const sensor_msgs::msg::Image::ConstSharedPtr& depth_msg,
178 const sensor_msgs::msg::CameraInfo::ConstSharedPtr& info_msg) {
179 return depthImageCallback(depth_msg, info_msg);
181 "raw", rmw_qos_profile_sensor_data);
187 sub_depth_image_.shutdown();
195 if (shape->type == shapes::MESH)
197 h = mesh_filter_->addMesh(
static_cast<const shapes::Mesh&
>(*shape));
201 std::unique_ptr<shapes::Mesh> m(shapes::createMeshFromShape(shape.get()));
203 h = mesh_filter_->addMesh(*m);
207 RCLCPP_ERROR(logger_,
"Mesh filter not yet initialized!");
214 mesh_filter_->removeMesh(handle);
222 RCLCPP_ERROR(logger_,
"Internal error. Mesh filter handle %u not found", h);
225 transform = it->second;
231const bool HOST_IS_BIG_ENDIAN = []() {
235 char c[
sizeof(uint32_t)];
236 } bint = { 0x01020304 };
237 return bint.c[0] == 1;
241void DepthImageOctomapUpdater::depthImageCallback(
const sensor_msgs::msg::Image::ConstSharedPtr& depth_msg,
242 const sensor_msgs::msg::CameraInfo::ConstSharedPtr& info_msg)
244 RCLCPP_DEBUG(logger_,
"Received a new depth image message (frame = '%s', encoding='%s')",
245 depth_msg->header.frame_id.c_str(), depth_msg->encoding.c_str());
246 rclcpp::Time
start = node_->now();
248 if (max_update_rate_ > 0)
251 if (node_->now() - last_update_time_ <= rclcpp::Duration::from_seconds(1.0 / max_update_rate_))
253 last_update_time_ = node_->now();
257 if (image_callback_count_ < 1000)
259 if (image_callback_count_ > 0)
261 const double dt_start = (
start - last_depth_callback_start_).seconds();
262 if (image_callback_count_ < 2)
264 average_callback_dt_ = dt_start;
268 average_callback_dt_ = ((image_callback_count_ - 1) * average_callback_dt_ + dt_start) /
269 static_cast<double>(image_callback_count_);
277 image_callback_count_ = 2;
279 last_depth_callback_start_ =
start;
280 ++image_callback_count_;
286 tf2::Stamped<tf2::Transform> map_h_sensor;
289 map_h_sensor.setIdentity();
296 static const double TEST_DT = 0.005;
298 static_cast<int>((0.5 + average_callback_dt_ / TEST_DT) * std::max(1, (
static_cast<int>(queue_size_) / 2)));
301 for (
int t = 0; t < nt; ++t)
305 tf2::fromMsg(tf_buffer_->lookupTransform(
monitor_->
getMapFrame(), depth_msg->header.frame_id,
306 depth_msg->header.stamp),
311 catch (tf2::TransformException& ex)
313 std::chrono::duration<double, std::nano> tmp_duration(TEST_DT);
314 static const rclcpp::Duration D(tmp_duration);
316 std::this_thread::sleep_for(D.to_chrono<std::chrono::seconds>());
319 static const unsigned int MAX_TF_COUNTER = 1000;
323 if (good_tf_ > MAX_TF_COUNTER)
325 const unsigned int div = MAX_TF_COUNTER / 10;
333 if (failed_tf_ > good_tf_)
335#pragma GCC diagnostic push
336#pragma GCC diagnostic ignored "-Wold-style-cast"
337 RCLCPP_WARN_THROTTLE(logger_, *node_->get_clock(), 1000,
338 "More than half of the image messages discarded due to TF being unavailable (%u%%). "
339 "Transform error of sensor data: %s; quitting callback.",
340 (100 * failed_tf_) / (good_tf_ + failed_tf_), err.c_str());
341#pragma GCC diagnostic pop
345#pragma GCC diagnostic push
346#pragma GCC diagnostic ignored "-Wold-style-cast"
347 RCLCPP_DEBUG_THROTTLE(logger_, *node_->get_clock(), 1000,
348 "Transform error of sensor data: %s; quitting callback", err.c_str());
349#pragma GCC diagnostic pop
351 if (failed_tf_ > MAX_TF_COUNTER)
353 const unsigned int div = MAX_TF_COUNTER / 10;
367 if (depth_msg->is_bigendian && !HOST_IS_BIG_ENDIAN)
369#pragma GCC diagnostic push
370#pragma GCC diagnostic ignored "-Wold-style-cast"
371 RCLCPP_ERROR_THROTTLE(logger_, *node_->get_clock(), 1000,
"endian problem: received image data does not match host");
372#pragma GCC diagnostic pop
375 const int w = depth_msg->width;
376 const int h = depth_msg->height;
380 params.
setCameraParameters(info_msg->k[0], info_msg->k[4], info_msg->k[2], info_msg->k[5]);
383 const bool is_u_short = depth_msg->encoding == sensor_msgs::image_encodings::TYPE_16UC1;
386 mesh_filter_->filter(&depth_msg->data[0], GL_UNSIGNED_SHORT);
390 if (depth_msg->encoding != sensor_msgs::image_encodings::TYPE_32FC1)
392#pragma GCC diagnostic push
393#pragma GCC diagnostic ignored "-Wold-style-cast"
394 RCLCPP_ERROR_THROTTLE(logger_, *node_->get_clock(), 1000,
"Unexpected encoding type: '%s'. Ignoring input.",
395 depth_msg->encoding.c_str());
396#pragma GCC diagnostic pop
399 mesh_filter_->filter(&depth_msg->data[0], GL_FLOAT);
405 const double px = info_msg->k[2];
406 const double py = info_msg->k[5];
409 if (w >=
static_cast<int>(x_cache_.size()) || h >=
static_cast<int>(y_cache_.size()) || K2_ != px || K5_ !=
py ||
410 K0_ != info_msg->k[0] || K4_ != info_msg->k[4])
414 K0_ = info_msg->k[0];
415 K4_ = info_msg->k[4];
421 if (isnan(px) || isnan(
py) || isnan(inv_fx_) || isnan(inv_fy_))
425 if (
static_cast<int>(x_cache_.size()) < w)
427 if (
static_cast<int>(y_cache_.size()) < h)
430 for (
int x = 0; x < w; ++x)
431 x_cache_[x] = (x - px) * inv_fx_;
433 for (
int y = 0; y < h; ++y)
434 y_cache_[y] = (y -
py) * inv_fy_;
437 const octomap::point3d sensor_origin(map_h_sensor.getOrigin().getX(), map_h_sensor.getOrigin().getY(),
438 map_h_sensor.getOrigin().getZ());
440 octomap::KeySet* occupied_cells_ptr =
new octomap::KeySet();
441 octomap::KeySet* model_cells_ptr =
new octomap::KeySet();
442 octomap::KeySet& occupied_cells = *occupied_cells_ptr;
443 octomap::KeySet& model_cells = *model_cells_ptr;
446 std::size_t img_size = h * w;
447 if (filtered_labels_.size() < img_size)
448 filtered_labels_.resize(img_size);
451 const unsigned int* labels_row = &filtered_labels_[0];
452 mesh_filter_->getFilteredLabels(&filtered_labels_[0]);
457 sensor_msgs::msg::Image debug_msg;
458 debug_msg.header = depth_msg->header;
459 debug_msg.height = h;
461 debug_msg.is_bigendian = HOST_IS_BIG_ENDIAN;
462 debug_msg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
463 debug_msg.step = w *
sizeof(float);
464 debug_msg.data.resize(img_size *
sizeof(
float));
465 mesh_filter_->getModelDepth(
reinterpret_cast<float*
>(&debug_msg.data[0]));
466 pub_model_depth_image_.publish(debug_msg, *info_msg);
468 sensor_msgs::msg::Image filtered_depth_msg;
469 filtered_depth_msg.header = depth_msg->header;
470 filtered_depth_msg.height = h;
471 filtered_depth_msg.width = w;
472 filtered_depth_msg.is_bigendian = HOST_IS_BIG_ENDIAN;
473 filtered_depth_msg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
474 filtered_depth_msg.step = w *
sizeof(float);
475 filtered_depth_msg.data.resize(img_size *
sizeof(
float));
476 mesh_filter_->getFilteredDepth(
reinterpret_cast<float*
>(&filtered_depth_msg.data[0]));
477 pub_filtered_depth_image_.publish(filtered_depth_msg, *info_msg);
479 sensor_msgs::msg::Image label_msg;
480 label_msg.header = depth_msg->header;
481 label_msg.height = h;
483 label_msg.is_bigendian = HOST_IS_BIG_ENDIAN;
484 label_msg.encoding = sensor_msgs::image_encodings::RGBA8;
485 label_msg.step = w *
sizeof(
unsigned int);
486 label_msg.data.resize(img_size *
sizeof(
unsigned int));
487 mesh_filter_->getFilteredLabels(
reinterpret_cast<unsigned int*
>(&label_msg.data[0]));
489 pub_filtered_label_image_.publish(label_msg, *info_msg);
492 if (!filtered_cloud_topic_.empty())
494 sensor_msgs::msg::Image filtered_msg;
495 filtered_msg.header = depth_msg->header;
496 filtered_msg.height = h;
497 filtered_msg.width = w;
498 filtered_msg.is_bigendian = HOST_IS_BIG_ENDIAN;
499 filtered_msg.encoding = sensor_msgs::image_encodings::TYPE_16UC1;
500 filtered_msg.step = w *
sizeof(
unsigned short);
501 filtered_msg.data.resize(img_size *
sizeof(
unsigned short));
504 static std::vector<float> filtered_data;
505 if (filtered_data.size() < img_size)
506 filtered_data.resize(img_size);
508 mesh_filter_->getFilteredDepth(
reinterpret_cast<float*
>(&filtered_data[0]));
509 unsigned short* msg_data =
reinterpret_cast<unsigned short*
>(&filtered_msg.data[0]);
510 for (std::size_t i = 0; i < img_size; ++i)
513 msg_data[i] =
static_cast<unsigned short>(filtered_data[i] * 1000 + 0.5);
515 pub_filtered_depth_image_.publish(filtered_msg, *info_msg);
523 const int h_bound = h - skip_vertical_pixels_;
524 const int w_bound = w - skip_horizontal_pixels_;
528 const uint16_t* input_row =
reinterpret_cast<const uint16_t*
>(&depth_msg->data[0]);
530 for (
int y = skip_vertical_pixels_; y < h_bound; ++y, labels_row += w, input_row += w)
532 for (
int x = skip_horizontal_pixels_; x < w_bound; ++x)
537 float zz =
static_cast<float>(input_row[x]) * 1e-3;
538 float yy = y_cache_[y] * zz;
539 float xx = x_cache_[x] * zz;
541 tf2::Vector3 point_tf = map_h_sensor * tf2::Vector3(xx, yy, zz);
542 occupied_cells.insert(
tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
547 float zz = input_row[x] * 1e-3;
548 float yy = y_cache_[y] * zz;
549 float xx = x_cache_[x] * zz;
551 tf2::Vector3 point_tf = map_h_sensor * tf2::Vector3(xx, yy, zz);
553 model_cells.insert(
tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
560 const float* input_row =
reinterpret_cast<const float*
>(&depth_msg->data[0]);
562 for (
int y = skip_vertical_pixels_; y < h_bound; ++y, labels_row += w, input_row += w)
564 for (
int x = skip_horizontal_pixels_; x < w_bound; ++x)
568 float zz = input_row[x];
569 float yy = y_cache_[y] * zz;
570 float xx = x_cache_[x] * zz;
572 tf2::Vector3 point_tf = map_h_sensor * tf2::Vector3(xx, yy, zz);
573 occupied_cells.insert(
tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
577 float zz = input_row[x];
578 float yy = y_cache_[y] * zz;
579 float xx = x_cache_[x] * zz;
581 tf2::Vector3 point_tf = map_h_sensor * tf2::Vector3(xx, yy, zz);
583 model_cells.insert(
tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
592 RCLCPP_ERROR(logger_,
"Internal error while parsing depth data");
598 for (
const octomap::OcTreeKey& model_cell : model_cells)
599 occupied_cells.erase(model_cell);
606 for (
const octomap::OcTreeKey& occupied_cell : occupied_cells)
607 tree_->updateNode(occupied_cell, true);
611 RCLCPP_ERROR(logger_,
"Internal error while updating octree");
613 tree_->unlockWrite();
614 tree_->triggerUpdateCallback();
617 free_space_updater_->pushLazyUpdate(occupied_cells_ptr, model_cells_ptr, sensor_origin);
619 RCLCPP_DEBUG(logger_,
"Processed depth image in %lf ms", (node_->now() -
start).seconds() * 1000.0);
std::function< bool(MeshHandle, Eigen::Isometry3d &)> TransformCallback
void setImageSize(unsigned width, unsigned height)
sets the image size
Parameters for Stereo-like devices.
void setCameraParameters(float fx, float fy, float cx, float cy)
sets the camera parameters of the pinhole camera where the disparities were obtained....
static const StereoCameraModel::Parameters & REGISTERED_PSDK_PARAMS
predefined sensor model for OpenNI compatible devices (e.g., PrimeSense, Kinect, Asus Xtion)
void forgetShape(ShapeHandle handle) override
DepthImageOctomapUpdater()
bool initialize(const rclcpp::Node::SharedPtr &node) override
Do any necessary setup (subscribe to ros topics, etc.). This call assumes setMonitor() and setParams(...
ShapeHandle excludeShape(const shapes::ShapeConstPtr &shape) override
~DepthImageOctomapUpdater() override
bool setParams(const std::string &name_space) override
Set updater params using struct that comes from parsing a yaml string. This must be called after setM...
const std::shared_ptr< tf2_ros::Buffer > & getTFClient() const
Gets the tf client.
const std::string & getMapFrame() const
Gets the map frame (this is set either by the constor or a parameter).
void setMapFrame(const std::string &frame)
Sets the map frame.
Base class for classes which update the occupancy map.
OccupancyMapMonitor * monitor_
collision_detection::OccMapTreePtr tree_
ShapeTransformCache transform_cache_
bool updateTransformCache(const std::string &target_frame, const rclcpp::Time &target_time)
Main namespace for MoveIt.