这次我们将学着怎么从一个深度图里面导出边界。我们对3种不同种类的点很感兴趣:物体的边框的点,阴影边框点,和面纱点(在障碍物边界和阴影边界),这是一个很典型的现象在通过雷达获取的3D深度。

下面是代码

/* \author Bastian Steder */
#include <iostream>
#include <boost/thread/thread.hpp>
#include <pcl/range_image/range_image.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/range_image_visualizer.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/features/range_image_border_extractor.h>
#include <pcl/console/parse.h>
typedef pcl::PointXYZ PointType;
// --------------------
// -----Parameters-----
// --------------------
float angular_resolution = 0.5f;
pcl::RangeImage::CoordinateFrame coordinate_frame = pcl::RangeImage::CAMERA_FRAME;
bool setUnseenToMaxRange = false;
// --------------
// -----Help-----
// --------------
void
printUsage (const char* progName)
{std::cout << "\n\nUsage: "<<progName<<" [options] <scene.pcd>\n\n"
<< "Options:\n"
<< "-------------------------------------------\n"
<< "-r <float>   angular resolution in degrees (default "<<angular_resolution<<")\n"
<< "-c <int>     coordinate frame (default "<< (int)coordinate_frame<<")\n"
<< "-m           Treat all unseen points to max range\n"
<< "-h           this help\n"
<< "\n\n";
}
// --------------
// -----Main-----
// --------------
int
main (int argc, char** argv)
{// --------------------------------------
// -----Parse Command Line Arguments-----
// --------------------------------------
if (pcl::console::find_argument (argc, argv, "-h") >= 0)
{printUsage (argv[0]);
return 0;
}
if (pcl::console::find_argument (argc, argv, "-m") >= 0)
{setUnseenToMaxRange = true;
cout << "Setting unseen values in range image to maximum range readings.\n";
}
int tmp_coordinate_frame;
if (pcl::console::parse (argc, argv, "-c", tmp_coordinate_frame) >= 0)
{coordinate_frame = pcl::RangeImage::CoordinateFrame (tmp_coordinate_frame);
cout << "Using coordinate frame "<< (int)coordinate_frame<<".\n";
}
if (pcl::console::parse (argc, argv, "-r", angular_resolution) >= 0)
cout << "Setting angular resolution to "<<angular_resolution<<"deg.\n";
angular_resolution = pcl::deg2rad (angular_resolution);
// ------------------------------------------------------------------
// -----Read pcd file or create example point cloud if not given-----
// ------------------------------------------------------------------
pcl::PointCloud<PointType>::Ptr point_cloud_ptr (new pcl::PointCloud<PointType>);
pcl::PointCloud<PointType>& point_cloud = *point_cloud_ptr;
pcl::PointCloud<pcl::PointWithViewpoint> far_ranges;
Eigen::Affine3f scene_sensor_pose (Eigen::Affine3f::Identity ());
std::vector<int> pcd_filename_indices = pcl::console::parse_file_extension_argument (argc, argv, "pcd");
if (!pcd_filename_indices.empty ())
{std::string filename = argv[pcd_filename_indices[0]];
if (pcl::io::loadPCDFile (filename, point_cloud) == -1)
{cout << "Was not able to open file \""<<filename<<"\".\n";
printUsage (argv[0]);
return 0;
}
scene_sensor_pose = Eigen::Affine3f (Eigen::Translation3f (point_cloud.sensor_origin_[0],
point_cloud.sensor_origin_[1],
point_cloud.sensor_origin_[2])) *
Eigen::Affine3f (point_cloud.sensor_orientation_);
std::string far_ranges_filename = pcl::getFilenameWithoutExtension (filename)+"_far_ranges.pcd";
if (pcl::io::loadPCDFile(far_ranges_filename.c_str(), far_ranges) == -1)
std::cout << "Far ranges file \""<<far_ranges_filename<<"\" does not exists.\n";
}
else
{cout << "\nNo *.pcd file given => Genarating example point cloud.\n\n";
for (float x=-0.5f; x<=0.5f; x+=0.01f)
{for (float y=-0.5f; y<=0.5f; y+=0.01f)
{PointType point;  point.x = x;  point.y = y;  point.z = 2.0f - y;
point_cloud.points.push_back (point);
}
}
point_cloud.width = (int) point_cloud.points.size ();  point_cloud.height = 1;
}
// -----------------------------------------------
// -----Create RangeImage from the PointCloud-----
// -----------------------------------------------
float noise_level = 0.0;
float min_range = 0.0f;
int border_size = 1;
boost::shared_ptr<pcl::RangeImage> range_image_ptr (new pcl::RangeImage);
pcl::RangeImage& range_image = *range_image_ptr;
range_image.createFromPointCloud (point_cloud, angular_resolution, pcl::deg2rad (360.0f), pcl::deg2rad (180.0f),
scene_sensor_pose, coordinate_frame, noise_level, min_range, border_size);
range_image.integrateFarRanges (far_ranges);
if (setUnseenToMaxRange)
range_image.setUnseenToMaxRange ();
// --------------------------------------------
// -----Open 3D viewer and add point cloud-----
// --------------------------------------------
pcl::visualization::PCLVisualizer viewer ("3D Viewer");
viewer.setBackgroundColor (1, 1, 1);
viewer.addCoordinateSystem (1.0f, "global");
pcl::visualization::PointCloudColorHandlerCustom<PointType> point_cloud_color_handler (point_cloud_ptr, 0, 0, 0);
viewer.addPointCloud (point_cloud_ptr, point_cloud_color_handler, "original point cloud");
//PointCloudColorHandlerCustom<pcl::PointWithRange> range_image_color_handler (range_image_ptr, 150, 150, 150);
//viewer.addPointCloud (range_image_ptr, range_image_color_handler, "range image");
//viewer.setPointCloudRenderingProperties (PCL_VISUALIZER_POINT_SIZE, 2, "range image");
// -------------------------
// -----Extract borders-----
// -------------------------
pcl::RangeImageBorderExtractor border_extractor (&range_image);
pcl::PointCloud<pcl::BorderDescription> border_descriptions;
border_extractor.compute (border_descriptions);
// ----------------------------------
// -----Show points in 3D viewer-----
// ----------------------------------
pcl::PointCloud<pcl::PointWithRange>::Ptr border_points_ptr(new pcl::PointCloud<pcl::PointWithRange>),
veil_points_ptr(new pcl::PointCloud<pcl::PointWithRange>),
shadow_points_ptr(new pcl::PointCloud<pcl::PointWithRange>);
pcl::PointCloud<pcl::PointWithRange>& border_points = *border_points_ptr,
& veil_points = * veil_points_ptr,
& shadow_points = *shadow_points_ptr;
for (int y=0; y< (int)range_image.height; ++y)
{for (int x=0; x< (int)range_image.width; ++x)
{if (border_descriptions.points[y*range_image.width + x].traits[pcl::BORDER_TRAIT__OBSTACLE_BORDER])
border_points.points.push_back (range_image.points[y*range_image.width + x]);
if (border_descriptions.points[y*range_image.width + x].traits[pcl::BORDER_TRAIT__VEIL_POINT])
veil_points.points.push_back (range_image.points[y*range_image.width + x]);
if (border_descriptions.points[y*range_image.width + x].traits[pcl::BORDER_TRAIT__SHADOW_BORDER])
shadow_points.points.push_back (range_image.points[y*range_image.width + x]);
}
}
pcl::visualization::PointCloudColorHandlerCustom<pcl::PointWithRange> border_points_color_handler (border_points_ptr, 0, 255, 0);
viewer.addPointCloud<pcl::PointWithRange> (border_points_ptr, border_points_color_handler, "border points");
viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 7, "border points");
pcl::visualization::PointCloudColorHandlerCustom<pcl::PointWithRange> veil_points_color_handler (veil_points_ptr, 255, 0, 0);
viewer.addPointCloud<pcl::PointWithRange> (veil_points_ptr, veil_points_color_handler, "veil points");
viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 7, "veil points");
pcl::visualization::PointCloudColorHandlerCustom<pcl::PointWithRange> shadow_points_color_handler (shadow_points_ptr, 0, 255, 255);
viewer.addPointCloud<pcl::PointWithRange> (shadow_points_ptr, shadow_points_color_handler, "shadow points");
viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 7, "shadow points");
//-------------------------------------
// -----Show points on range image-----
// ------------------------------------
pcl::visualization::RangeImageVisualizer* range_image_borders_widget = NULL;
range_image_borders_widget =
pcl::visualization::RangeImageVisualizer::getRangeImageBordersWidget (range_image, -std::numeric_limits<float>::infinity (), std::numeric_limits<float>::infinity (), false,
border_descriptions, "Range image with borders");
// -------------------------------------
//--------------------
// -----Main loop-----
//--------------------
while (!viewer.wasStopped ())
{range_image_borders_widget->spinOnce ();
viewer.spinOnce ();
pcl_sleep(0.01);
}
}

代码解释

在刚开始,我们做命令行解析,从一个磁盘里面读取点云,我们创造了一个深度图并把它进行可视化。所有的这些步骤在"Range Image Visualization"里面有讲。

这里只有一小点偏差。为了导出边缘信息,我们要区别出无法到的深度点和超出观察范围之外的深度点。接着我们标记一个边框,观察不到的点不用标记。因此提供一些测量参数是很重要的。我们将找到一个额外的pcd文件包含如下的值。

std::string far_ranges_filename = pcl::getFilenameWithoutExtension (filename)+"_far_ranges.pcd";
if (pcl::io::loadPCDFile(far_ranges_filename.c_str(), far_ranges) == -1)
std::cout << "Far ranges file \""<<far_ranges_filename<<"\" does not exists.\n";

他们等一下将融入深度图里面

range_image.integrateFarRanges (far_ranges);

如果这些值没有提供,命令行参数-m将被用来赋值,所有不能观测到地点都被认为很远距离的点。

if (setUnseenToMaxRange)
range_image.setUnseenToMaxRange ();

接下去我们将来到与边缘导出相关的部分

pcl::RangeImageBorderExtractor border_extractor (&range_image);
pcl::PointCloud<pcl::BorderDescription> border_descriptions;
border_extractor.compute (border_descriptions);

上面将会创建RangeImageBorderExtractor这个类,给一个深度图,计算边缘信息,并把它存在border_descriptions里面。

最后 ,viewer.addCoordinateSystem (1.0f, "global");可能会出现错误,把代码改成viewer.addCoordinateSystem (1.0f);

直接运行它

/range_image_border_extraction -m

使用一个点云文件

./range_image_border_extraction <point_cloud.pcd>

从深度图里面导出边界相关推荐

  1. 从一个深度图里面导出NARF特征

    本节将显示如何提取出NARF关键点通过NARF描述器从一个深度图里面. 以下是一段代码 #include <iostream>#include <boost/thread/threa ...

  2. Global Mapper 导出图层功能的妙用(重采样、设置文件类型、切片、按掩膜提取or裁剪……)

    许多GIS软件都有导出的功能,但其中大部分的导出功能比较单一直接,仅仅是导出而已,或者最多可以改个导出的格式,改个坐标. 但是Global Mapper 不一样,导出功能非常非常多,比如重采样(可以设 ...

  3. 基于多特征地图和深度学习的实时交通场景分割

    https://www.toutiao.com/a6623529829402673667/ 2018-11-14 09:58:33 Ⅰ.介绍 交通场景分割是智能车辆在检测障碍物.规划路径和自主导航中的 ...

  4. 动态规划(DP)通俗讲解

    参考 徐凯强 Andy 动态规划中递推式的求解方法不是动态规划的本质. 我曾经作为省队成员参加过NOI,保送之后也给学校参加NOIP的同学多次讲过动态规划,我试着讲一下我理解的动态规划,争取深入浅出. ...

  5. PlanarSLAM:基于结构化约束的视觉SLAM

    点击上方"3D视觉工坊",选择"星标" 干货第一时间送达 引言 在我们所熟知经典SLAM系统,以ORB-SLAM为代表的通过特征点法在相机位姿估计方面有很好的表 ...

  6. 动态规划 dynamic programming

    动态规划dynamic programming June,7, 2015 作者:swanGooseMan 出处:http://www.cnblogs.com/swanGooseMan/p/455658 ...

  7. 现实地形导入UE4全流程

    制作地形方法很多,今天给大家分享一种原创野套路.此方法特点是将现实中的地形于UE4中呈现,而不是手动绘制地形.首先从地理空间数据云获得指定区域的地理数据,然后使用GlobalMapper更准确选出区域 ...

  8. 乡镇级echarts地图json获取、各省市区地图json文件

    转载原文地址:https://blog.csdn.net/weixin_44861708/article/details/114223258 省市区地图json 链接:https://pan.baid ...

  9. 边界元与有限元方法相比较的优缺点

     一本书上关于有限元和边界元的比较,摘录如下:有限元基于区域上的变分原理和剖分插值,边界元基于边界归化及边界上的剖分插值:有限元属于区域法,其剖分涉及到整个区域,而边界元只需对边界离散,因此,可以 ...

最新文章

  1. 我一直在假装努力,你却在真正成长
  2. spring hibernate 连接sqlserver 数据库的时候还需要jdbc包吗?
  3. formatnumber js_javascript js format number 数字格式化
  4. .hpp文件_3 OpenCV的头文件说明及第一个示例程序
  5. border:none 与border:0的区别
  6. 智能车复工日记【7】:关于会车的图像问题
  7. 随笔27 面向对象的五大基本原则
  8. Ruby on Rails 和 J2EE:两者能否共存?
  9. VS C++调用外部exe
  10. 计算机硬盘大小一般都是整数,电脑硬盘怎么精准整数分区
  11. CUDA编程1--GPU内存模型
  12. 解决Ubuntu与Windows不能复制粘贴问题
  13. 码农翻身之我是一个线程 --- 读书笔记
  14. Flink报错:org.apache.flink.util.FlinkRuntimeException: Exceeded checkpoint tolerable failure threshold
  15. 【jzoj4598】【准备食物】【字典树】
  16. 【stm32f103中断编程步骤】
  17. python利用selenium爬取京东数据
  18. Kali-WIFI攻防(二)----无线网络分析工具Aircrack-ng
  19. 矩估计和最大似然估计关系
  20. Dell G3 3579更换固态硬盘,保留C盘内容

热门文章

  1. win7纯净版镜像系统安装教程
  2. 谷歌浏览器怎么截图 Google Chrome截图方法
  3. java 自定义注解+AOP实现日志记录
  4. python组成结构_Python数据分析丨pandas基本数据结构组成
  5. Spring Cloud —— 链路追踪技术
  6. java中按钮的接口_Java接口基础
  7. pdf从结构新建书签_强力推荐一款PDF神器
  8. 欧几里得算法和扩展欧几里得算法详解
  9. php 按照laravel5.5,Laravel5.5 综合使用
  10. python参考文献_[zotero/python]库中参考文献条目删除后,清除残留PDF的脚本