39 #include <tf2_geometry_msgs/tf2_geometry_msgs.hpp> 
   41 #if __has_include(<tf2/LinearMath/Vector3.hpp>) 
   42 #include <tf2/LinearMath/Vector3.hpp> 
   44 #include <tf2/LinearMath/Vector3.h> 
   46 #if __has_include(<tf2/LinearMath/Transform.hpp>) 
   47 #include <tf2/LinearMath/Transform.hpp> 
   49 #include <tf2/LinearMath/Transform.h> 
   51 #include <geometric_shapes/shape_operations.h> 
   52 #include <sensor_msgs/image_encodings.hpp> 
   59 static const rclcpp::Logger 
LOGGER = rclcpp::get_logger(
"moveit.ros.perception.depth_image_octomap_updater");
 
   63   , image_topic_(
"depth")
 
   65   , near_clipping_plane_distance_(0.3)
 
   66   , far_clipping_plane_distance_(5.0)
 
   67   , shadow_threshold_(0.04)
 
   69   , padding_offset_(0.02)
 
   71   , skip_vertical_pixels_(4)
 
   72   , skip_horizontal_pixels_(6)
 
   73   , image_callback_count_(0)
 
   74   , average_callback_dt_(0.0)
 
   94     node_->get_parameter(name_space + 
".image_topic", image_topic_) &&
 
   95         node_->get_parameter(name_space + 
".queue_size", queue_size_) &&
 
   96         node_->get_parameter(name_space + 
".near_clipping_plane_distance", near_clipping_plane_distance_) &&
 
   97         node_->get_parameter(name_space + 
".far_clipping_plane_distance", far_clipping_plane_distance_) &&
 
   98         node_->get_parameter(name_space + 
".shadow_threshold", shadow_threshold_) &&
 
   99         node_->get_parameter(name_space + 
".padding_scale", padding_scale_) &&
 
  100         node_->get_parameter(name_space + 
".padding_offset", padding_offset_) &&
 
  101         node_->get_parameter(name_space + 
".max_update_rate", max_update_rate_) &&
 
  102         node_->get_parameter(name_space + 
".skip_vertical_pixels", skip_vertical_pixels_) &&
 
  103         node_->get_parameter(name_space + 
".skip_horizontal_pixels", skip_horizontal_pixels_) &&
 
  104         node_->get_parameter(name_space + 
".filtered_cloud_topic", filtered_cloud_topic_) &&
 
  105         node_->get_parameter(name_space + 
".ns", ns_);
 
  108   catch (
const rclcpp::exceptions::InvalidParameterTypeException& e)
 
  110     RCLCPP_ERROR_STREAM(LOGGER, e.what() << 
'\n');
 
  118   input_depth_transport_ = std::make_unique<image_transport::ImageTransport>(node_);
 
  119   model_depth_transport_ = std::make_unique<image_transport::ImageTransport>(node_);
 
  120   filtered_depth_transport_ = std::make_unique<image_transport::ImageTransport>(node_);
 
  121   filtered_label_transport_ = std::make_unique<image_transport::ImageTransport>(node_);
 
  124   free_space_updater_ = std::make_unique<LazyFreeSpaceUpdater>(
tree_);
 
  127   mesh_filter_ = std::make_unique<mesh_filter::MeshFilter<mesh_filter::StereoCameraModel>>(
 
  129   mesh_filter_->parameters().setDepthRange(near_clipping_plane_distance_, far_clipping_plane_distance_);
 
  130   mesh_filter_->setShadowThreshold(shadow_threshold_);
 
  131   mesh_filter_->setPaddingOffset(padding_offset_);
 
  132   mesh_filter_->setPaddingScale(padding_scale_);
 
  133   mesh_filter_->setTransformCallback(
 
  141   pub_model_depth_image_ = model_depth_transport_->advertiseCamera(
"model_depth", 1);
 
  143   std::string prefix = 
"";
 
  147   pub_model_depth_image_ = model_depth_transport_->advertiseCamera(prefix + 
"model_depth", 1);
 
  148   if (!filtered_cloud_topic_.empty())
 
  149     pub_filtered_depth_image_ = filtered_depth_transport_->advertiseCamera(prefix + filtered_cloud_topic_, 1);
 
  151     pub_filtered_depth_image_ = filtered_depth_transport_->advertiseCamera(prefix + 
"filtered_depth", 1);
 
  153   pub_filtered_label_image_ = filtered_label_transport_->advertiseCamera(prefix + 
"filtered_label", 1);
 
  155   sub_depth_image_ = image_transport::create_camera_subscription(
 
  156       node_.get(), image_topic_,
 
  157       [
this](
const sensor_msgs::msg::Image::ConstSharedPtr& depth_msg,
 
  158              const sensor_msgs::msg::CameraInfo::ConstSharedPtr& info_msg) {
 
  159         return depthImageCallback(depth_msg, info_msg);
 
  161       "raw", rmw_qos_profile_sensor_data);
 
  169 void DepthImageOctomapUpdater::stopHelper()
 
  171   sub_depth_image_.shutdown();
 
  179     if (shape->type == shapes::MESH)
 
  180       h = mesh_filter_->addMesh(
static_cast<const shapes::Mesh&
>(*shape));
 
  183       std::unique_ptr<shapes::Mesh> m(shapes::createMeshFromShape(shape.get()));
 
  185         h = mesh_filter_->addMesh(*m);
 
  189     RCLCPP_ERROR(LOGGER, 
"Mesh filter not yet initialized!");
 
  196     mesh_filter_->removeMesh(handle);
 
  199 bool DepthImageOctomapUpdater::getShapeTransform(
mesh_filter::MeshHandle h, Eigen::Isometry3d& transform)
 const 
  204     RCLCPP_ERROR(LOGGER, 
"Internal error. Mesh filter handle %u not found", h);
 
  207   transform = it->second;
 
  213 bool host_is_big_endian()
 
  218     char c[
sizeof(uint32_t)];
 
  219   } bint = { 0x01020304 };
 
  220   return bint.c[0] == 1;
 
  224 static const bool HOST_IS_BIG_ENDIAN = host_is_big_endian();
 
  226 void DepthImageOctomapUpdater::depthImageCallback(
const sensor_msgs::msg::Image::ConstSharedPtr& depth_msg,
 
  227                                                   const sensor_msgs::msg::CameraInfo::ConstSharedPtr& info_msg)
 
  229   RCLCPP_DEBUG(LOGGER, 
"Received a new depth image message (frame = '%s', encoding='%s')",
 
  230                depth_msg->header.frame_id.c_str(), depth_msg->encoding.c_str());
 
  231   rclcpp::Time 
start = node_->now();
 
  233   if (max_update_rate_ > 0)
 
  236     if (node_->now() - last_update_time_ <= rclcpp::Duration::from_seconds(1.0 / max_update_rate_))
 
  238     last_update_time_ = node_->now();
 
  242   if (image_callback_count_ < 1000)
 
  244     if (image_callback_count_ > 0)
 
  246       const double dt_start = (
start - last_depth_callback_start_).seconds();
 
  247       if (image_callback_count_ < 2)
 
  248         average_callback_dt_ = dt_start;
 
  250         average_callback_dt_ =
 
  251             ((image_callback_count_ - 1) * average_callback_dt_ + dt_start) / (double)image_callback_count_;
 
  257     image_callback_count_ = 2;
 
  258   last_depth_callback_start_ = 
start;
 
  259   ++image_callback_count_;
 
  265   tf2::Stamped<tf2::Transform> map_h_sensor;
 
  267     map_h_sensor.setIdentity();
 
  273       static const double TEST_DT = 0.005;
 
  275           static_cast<int>((0.5 + average_callback_dt_ / TEST_DT) * std::max(1, (
static_cast<int>(queue_size_) / 2)));
 
  278       for (
int t = 0; t < nt; ++t)
 
  281           tf2::fromMsg(tf_buffer_->lookupTransform(
monitor_->
getMapFrame(), depth_msg->header.frame_id,
 
  282                                                    depth_msg->header.stamp),
 
  287         catch (tf2::TransformException& ex)
 
  289           std::chrono::duration<double, std::nano> tmp_duration(TEST_DT);
 
  290           static const rclcpp::Duration D(tmp_duration);
 
  292           std::this_thread::sleep_for(D.to_chrono<std::chrono::seconds>());
 
  294       static const unsigned int MAX_TF_COUNTER = 1000;  
 
  298         if (good_tf_ > MAX_TF_COUNTER)
 
  300           const unsigned int div = MAX_TF_COUNTER / 10;
 
  308         if (failed_tf_ > good_tf_)
 
  309           RCLCPP_WARN_THROTTLE(LOGGER, *node_->get_clock(), 1000,
 
  310                                "More than half of the image messages discarded due to TF being unavailable (%u%%). " 
  311                                "Transform error of sensor data: %s; quitting callback.",
 
  312                                (100 * failed_tf_) / (good_tf_ + failed_tf_), err.c_str());
 
  314           RCLCPP_DEBUG_THROTTLE(LOGGER, *node_->get_clock(), 1000,
 
  315                                 "Transform error of sensor data: %s; quitting callback", err.c_str());
 
  316         if (failed_tf_ > MAX_TF_COUNTER)
 
  318           const unsigned int div = MAX_TF_COUNTER / 10;
 
  332   if (depth_msg->is_bigendian && !HOST_IS_BIG_ENDIAN)
 
  333     RCLCPP_ERROR_THROTTLE(LOGGER, *node_->get_clock(), 1000, 
"endian problem: received image data does not match host");
 
  335   const int w = depth_msg->width;
 
  336   const int h = depth_msg->height;
 
  340   params.
setCameraParameters(info_msg->k[0], info_msg->k[4], info_msg->k[2], info_msg->k[5]);
 
  343   const bool is_u_short = depth_msg->encoding == sensor_msgs::image_encodings::TYPE_16UC1;
 
  345     mesh_filter_->filter(&depth_msg->data[0], GL_UNSIGNED_SHORT);
 
  348     if (depth_msg->encoding != sensor_msgs::image_encodings::TYPE_32FC1)
 
  350       RCLCPP_ERROR_THROTTLE(LOGGER, *node_->get_clock(), 1000, 
"Unexpected encoding type: '%s'. Ignoring input.",
 
  351                             depth_msg->encoding.c_str());
 
  354     mesh_filter_->filter(&depth_msg->data[0], GL_FLOAT);
 
  360   const double px = info_msg->k[2];
 
  361   const double py = info_msg->k[5];
 
  364   if (
w >= 
static_cast<int>(x_cache_.size()) || h >= 
static_cast<int>(y_cache_.size()) || K2_ != px || K5_ != 
py ||
 
  365       K0_ != info_msg->k[0] || K4_ != info_msg->k[4])
 
  369     K0_ = info_msg->k[0];
 
  370     K4_ = info_msg->k[4];
 
  376     if (!(px == px && 
py == 
py && inv_fx_ == inv_fx_ && inv_fy_ == inv_fy_))
 
  380     if (
static_cast<int>(x_cache_.size()) < 
w)
 
  382     if (
static_cast<int>(y_cache_.size()) < h)
 
  385     for (
int x = 0; 
x < 
w; ++
x)
 
  386       x_cache_[
x] = (
x - px) * inv_fx_;
 
  388     for (
int y = 0; 
y < h; ++
y)
 
  389       y_cache_[
y] = (
y - 
py) * inv_fy_;
 
  392   const octomap::point3d sensor_origin(map_h_sensor.getOrigin().getX(), map_h_sensor.getOrigin().getY(),
 
  393                                        map_h_sensor.getOrigin().getZ());
 
  395   octomap::KeySet* occupied_cells_ptr = 
new octomap::KeySet();
 
  396   octomap::KeySet* model_cells_ptr = 
new octomap::KeySet();
 
  397   octomap::KeySet& occupied_cells = *occupied_cells_ptr;
 
  398   octomap::KeySet& model_cells = *model_cells_ptr;
 
  401   std::size_t img_size = h * 
w;
 
  402   if (filtered_labels_.size() < img_size)
 
  403     filtered_labels_.resize(img_size);
 
  406   const unsigned int* labels_row = &filtered_labels_[0];
 
  407   mesh_filter_->getFilteredLabels(&filtered_labels_[0]);
 
  412     sensor_msgs::msg::Image debug_msg;
 
  413     debug_msg.header = depth_msg->header;
 
  414     debug_msg.height = h;
 
  416     debug_msg.is_bigendian = HOST_IS_BIG_ENDIAN;
 
  417     debug_msg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
 
  418     debug_msg.step = 
w * 
sizeof(float);
 
  419     debug_msg.data.resize(img_size * 
sizeof(
float));
 
  420     mesh_filter_->getModelDepth(
reinterpret_cast<float*
>(&debug_msg.data[0]));
 
  421     pub_model_depth_image_.publish(debug_msg, *info_msg);
 
  423     sensor_msgs::msg::Image filtered_depth_msg;
 
  424     filtered_depth_msg.header = depth_msg->header;
 
  425     filtered_depth_msg.height = h;
 
  426     filtered_depth_msg.width = 
w;
 
  427     filtered_depth_msg.is_bigendian = HOST_IS_BIG_ENDIAN;
 
  428     filtered_depth_msg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
 
  429     filtered_depth_msg.step = 
w * 
sizeof(float);
 
  430     filtered_depth_msg.data.resize(img_size * 
sizeof(
float));
 
  431     mesh_filter_->getFilteredDepth(
reinterpret_cast<float*
>(&filtered_depth_msg.data[0]));
 
  432     pub_filtered_depth_image_.publish(filtered_depth_msg, *info_msg);
 
  434     sensor_msgs::msg::Image label_msg;
 
  435     label_msg.header = depth_msg->header;
 
  436     label_msg.height = h;
 
  438     label_msg.is_bigendian = HOST_IS_BIG_ENDIAN;
 
  439     label_msg.encoding = sensor_msgs::image_encodings::RGBA8;
 
  440     label_msg.step = 
w * 
sizeof(
unsigned int);
 
  441     label_msg.data.resize(img_size * 
sizeof(
unsigned int));
 
  442     mesh_filter_->getFilteredLabels(
reinterpret_cast<unsigned int*
>(&label_msg.data[0]));
 
  444     pub_filtered_label_image_.publish(label_msg, *info_msg);
 
  447   if (!filtered_cloud_topic_.empty())
 
  449     sensor_msgs::msg::Image filtered_msg;
 
  450     filtered_msg.header = depth_msg->header;
 
  451     filtered_msg.height = h;
 
  452     filtered_msg.width = 
w;
 
  453     filtered_msg.is_bigendian = HOST_IS_BIG_ENDIAN;
 
  454     filtered_msg.encoding = sensor_msgs::image_encodings::TYPE_16UC1;
 
  455     filtered_msg.step = 
w * 
sizeof(
unsigned short);
 
  456     filtered_msg.data.resize(img_size * 
sizeof(
unsigned short));
 
  459     static std::vector<float> filtered_data;
 
  460     if (filtered_data.size() < img_size)
 
  461       filtered_data.resize(img_size);
 
  463     mesh_filter_->getFilteredDepth(
reinterpret_cast<float*
>(&filtered_data[0]));
 
  464     unsigned short* msg_data = 
reinterpret_cast<unsigned short*
>(&filtered_msg.data[0]);
 
  465     for (std::size_t i = 0; i < img_size; ++i)
 
  468       msg_data[i] = 
static_cast<unsigned short>(filtered_data[i] * 1000 + 0.5);
 
  470     pub_filtered_depth_image_.publish(filtered_msg, *info_msg);
 
  478     const int h_bound = h - skip_vertical_pixels_;
 
  479     const int w_bound = 
w - skip_horizontal_pixels_;
 
  483       const uint16_t* input_row = 
reinterpret_cast<const uint16_t*
>(&depth_msg->data[0]);
 
  485       for (
int y = skip_vertical_pixels_; 
y < h_bound; ++
y, labels_row += 
w, input_row += 
w)
 
  486         for (
int x = skip_horizontal_pixels_; 
x < w_bound; ++
x)
 
  491             float zz = (float)input_row[
x] * 1e-3;  
 
  492             float yy = y_cache_[
y] * zz;
 
  493             float xx = x_cache_[
x] * zz;
 
  495             tf2::Vector3 point_tf = map_h_sensor * tf2::Vector3(xx, yy, zz);
 
  496             occupied_cells.insert(
tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
 
  501             float zz = input_row[
x] * 1e-3;
 
  502             float yy = y_cache_[
y] * zz;
 
  503             float xx = x_cache_[
x] * zz;
 
  505             tf2::Vector3 point_tf = map_h_sensor * tf2::Vector3(xx, yy, zz);
 
  507             model_cells.insert(
tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
 
  513       const float* input_row = 
reinterpret_cast<const float*
>(&depth_msg->data[0]);
 
  515       for (
int y = skip_vertical_pixels_; 
y < h_bound; ++
y, labels_row += 
w, input_row += 
w)
 
  516         for (
int x = skip_horizontal_pixels_; 
x < w_bound; ++
x)
 
  520             float zz = input_row[
x];
 
  521             float yy = y_cache_[
y] * zz;
 
  522             float xx = x_cache_[
x] * zz;
 
  524             tf2::Vector3 point_tf = map_h_sensor * tf2::Vector3(xx, yy, zz);
 
  525             occupied_cells.insert(
tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
 
  529             float zz = input_row[
x];
 
  530             float yy = y_cache_[
y] * zz;
 
  531             float xx = x_cache_[
x] * zz;
 
  533             tf2::Vector3 point_tf = map_h_sensor * tf2::Vector3(xx, yy, zz);
 
  535             model_cells.insert(
tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
 
  543     RCLCPP_ERROR(LOGGER, 
"Internal error while parsing depth data");
 
  549   for (
const octomap::OcTreeKey& model_cell : model_cells)
 
  550     occupied_cells.erase(model_cell);
 
  557     for (
const octomap::OcTreeKey& occupied_cell : occupied_cells)
 
  558       tree_->updateNode(occupied_cell, 
true);
 
  562     RCLCPP_ERROR(LOGGER, 
"Internal error while updating octree");
 
  564   tree_->unlockWrite();
 
  565   tree_->triggerUpdateCallback();
 
  568   free_space_updater_->pushLazyUpdate(occupied_cells_ptr, model_cells_ptr, sensor_origin);
 
  570   RCLCPP_DEBUG(LOGGER, 
"Processed depth image in %lf ms", (node_->now() - 
start).seconds() * 1000.0);
 
std::function< bool(MeshHandle, Eigen::Isometry3d &)> TransformCallback
 
void setImageSize(unsigned width, unsigned height)
sets the image size
 
Parameters for Stereo-like devices.
 
void setCameraParameters(float fx, float fy, float cx, float cy)
sets the camera parameters of the pinhole camera where the disparities were obtained....
 
static const StereoCameraModel::Parameters & REGISTERED_PSDK_PARAMS
predefined sensor model for OpenNI compatible devices (e.g., PrimeSense, Kinect, Asus Xtion)
 
void forgetShape(ShapeHandle handle) override
 
DepthImageOctomapUpdater()
 
bool initialize(const rclcpp::Node::SharedPtr &node) override
Do any necessary setup (subscribe to ros topics, etc.). This call assumes setMonitor() and setParams(...
 
ShapeHandle excludeShape(const shapes::ShapeConstPtr &shape) override
 
~DepthImageOctomapUpdater() override
 
bool setParams(const std::string &name_space) override
Set updater params using struct that comes from parsing a yaml string. This must be called after setM...
 
const std::shared_ptr< tf2_ros::Buffer > & getTFClient() const
Gets the tf client.
 
const std::string & getMapFrame() const
Gets the map frame (this is set either by the constor or a parameter).
 
void setMapFrame(const std::string &frame)
Sets the map frame.
 
Base class for classes which update the occupancy map.
 
OccupancyMapMonitor * monitor_
 
collision_detection::OccMapTreePtr tree_
 
ShapeTransformCache transform_cache_
 
bool updateTransformCache(const std::string &target_frame, const rclcpp::Time &target_time)
 
const rclcpp::Logger LOGGER