Keypoint关键点-1 PCL学习记录-11 Keypoint关键点(通过深度图识别关键点)

   关键点也称为兴趣点,它是 2D 图像或 3D 点云或曲面模型上,可以通过检测标准来获取的具有稳定性、区别性的点集。从技术上来说,关键点的数量比原始点云或图像的数据量少很多,其与局部特征描述子结合组成关键点描述子。常用来构成原始数据的紧凑表示 ,具有代表性与描述性,从而加快后续识别、追踪等对数据的处理速度 。

固而,关键点提取就成为 2D 与 3D 信息处理中不可或缺的关键技术 。

关键点概念及算法

NARF(Normal Aligned Radial Feature)关键点是为了从深度图像中识别物体而提出的,关键点探测的重要一步是减少特征提取时的搜索空间,把重点放在重要的结构上,对 NARF 关键点提取过程有以下要求:

  • 提取的过程必须考虑边缘以及物体表面变化信息
  • 即使换了不同的视角,关键点的位置必须稳定的可以被重复探测
  • 关键点所在的位置必须有稳定的支持区域,可以计算描述子和估计唯一的法向量。

为了满足上述要求,可以通过以下探测步骤来进行关键点提取:

  1. 遍历每个深度图像点,通过寻找在近邻区域有深度突变的位置进行边缘检测;
  2. 历每个深度图像点,根据近邻区域的表面变化决定一测度表面变化的系数,以及变化的主方向;
  3. 根据第2步找到的主方向计算兴趣值,表征该方向与其他方向的不同,以及该处表面的变化情况,即该点有多稳定;
  4. 对兴趣值进行平滑过滤;
  5. 进行无最大值压缩找到最终的关键点,即为 NARF 关键点。

代码实现为(包含主要注释):

/**这个例子主要作用是采用NARF特征点提取算法,从深度图(rangimage)中,
 * 提取矩形边缘的特征点
 * 大致过程如下:
 * 1. 导入点云文件(PCD)或生成一个点云文件
 * 2. 将点云文件转化为深度图
 * 3. 使用NARF特征点提取法,提取出特征
 * 关于NARF特征点方法,可见链接**/

#include <iostream>

#include <pcl/range_image/range_image.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/range_image_visualizer.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/features/range_image_border_extractor.h>
#include <pcl/keypoints/narf_keypoint.h>
#include <pcl/console/parse.h>
#include <pcl/common/file_io.h> // for getFilenameWithoutExtension


typedef pcl::PointXYZ PointType;
//Step1: 设置一些参数
float angular_resolution =0.5f; //angular_resolution为模拟的深度传感器的角度分辨率,即深度图像中一个像素对应的角度大小
float support_size = 0.2f;      //点云大小的设置
pcl::RangeImage::CoordinateFrame coordinate_frame = pcl::RangeImage::CAMERA_FRAME;//设置深度图的相机视角
bool setUnseenToMaxRange = false; //!!重要:特征提取时,深度图的边缘是否作为边缘点进行识别


//Step2 根据命令行输入的参数,运行相应的功能; 并显示提示信息
void printUsage (const char* progName)
{
  std::cout << "\n\nUsage: "<<progName<<" [options] <scene.pcd>\n\n"
            << "Options:\n"
            << "-------------------------------------------\n"
            << "-r <float>   angular resolution in degrees (default "<<angular_resolution<<")\n"
            << "-c <int>     coordinate frame (default "<< (int)coordinate_frame<<")\n"
            << "-m           Treat all unseen points as maximum range readings\n"
            << "-s <float>   support size for the interest points (diameter of the used sphere - "
            <<                                                     "default "<<support_size<<")\n"
            << "-h           this help\n"
            << "\n\n";
}

void setViewerPose(pcl::visualization::PCLVisualizer &viewer, const Eigen::Affine3f &viewer_pose) //设置视口的位姿
{
    Eigen::Vector3f pos_vector = viewer_pose * Eigen::Vector3f(0, 0, 0);//列向量                       //视口的原点pos_vector
    Eigen::Vector3f look_at_vector = viewer_pose.rotation() * Eigen::Vector3f(0, 0, 1) + pos_vector; //旋转+平移look_at_vector
    Eigen::Vector3f up_vector = viewer_pose.rotation() * Eigen::Vector3f(0, -1, 0);                  //up_vector
    viewer.setCameraPosition(pos_vector[0], pos_vector[1], pos_vector[2],                            //设置照相机的位姿
                             look_at_vector[0], look_at_vector[1], look_at_vector[2],
                             up_vector[0], up_vector[1], up_vector[2]);
}





//Step3. 主函数
int main(int argc, char** argv){
    //Step3.1 从命令行中读取输入参数!
    //注意:以后可以用相同方法进行多选单编程
    if (pcl::console::find_argument (argc, argv, "-h") >= 0)
    {
        printUsage (argv[0]);
        return 0;
    }
    if (pcl::console::find_argument (argc, argv, "-m") >= 0)
    {   //这个语句是将深度图的边缘作为识别边缘,即深度图边缘也将被识别成物体边缘
        setUnseenToMaxRange = true;
        std::cout << "Setting unseen values in range image to maximum range readings.\n";
    }

    int tmp_coordinate_frame;

    if (pcl::console::parse (argc, argv, "-c", tmp_coordinate_frame) >= 0)
    {
        coordinate_frame = pcl::RangeImage::CoordinateFrame (tmp_coordinate_frame);
        std::cout << "Using coordinate frame "<< (int)coordinate_frame<<".\n";
    }
    if (pcl::console::parse (argc, argv, "-s", support_size) >= 0)
        std::cout << "Setting support size to "<<support_size<<".\n";

    if (pcl::console::parse (argc, argv, "-r", angular_resolution) >= 0)
        std::cout << "Setting angular resolution to "<<angular_resolution<<"deg.\n";
    angular_resolution = pcl::deg2rad (angular_resolution);

//Step4 读取加载PCD文件或者新建一个点云对象
    pcl::PointCloud<PointType>::Ptr point_cloud_ptr (new pcl::PointCloud<PointType>);
    pcl::PointCloud<PointType> &point_cloud =*point_cloud_ptr;

    pcl::PointCloud<pcl::PointWithViewpoint> far_ranges;             //这个是什么作用-------->>>>>>
    Eigen::Affine3f scene_sensor_pose(Eigen::Affine3f::Identity());  //设置一个仿射变换对象,用来进行相机位姿调整

    std::vector<int> pcd_filename_indices = pcl::console::parse_file_extension_argument (argc, argv, "pcd");  
    //创建一个int类型vector,用来读取文件的拓展名
    //需要注意的是:拓展名是存放在了这个vector里面的第[0]个元素里
    
    //识别输入的文件拓展名是否为pcd点云
        if (!pcd_filename_indices.empty ())
    {
        std::string filename = argv[pcd_filename_indices[0]];
        if (pcl::io::loadPCDFile (filename, point_cloud) == -1)
        {
        std::cerr << "Was not able to open file \""<<filename<<"\".\n";
        printUsage (argv[0]);
        return 0;
        }
        
        //理解是:获取点云的sensor_origin+orientation,保存到scene_sensor_pose
        scene_sensor_pose = Eigen::Affine3f (Eigen::Translation3f (point_cloud.sensor_origin_[0],    //场景传感器的位置
                                                                   point_cloud.sensor_origin_[1],
                                                                   point_cloud.sensor_origin_[2])) *
                            Eigen::Affine3f (point_cloud.sensor_orientation_);

        std::string far_ranges_filename = pcl::getFilenameWithoutExtension (filename)+"_far_ranges.pcd";
        if (pcl::io::loadPCDFile (far_ranges_filename.c_str (), far_ranges) == -1)
        std::cout << "Far ranges file \""<<far_ranges_filename<<"\" does not exists.\n";
    }
    else
    {
        setUnseenToMaxRange = true;
        std::cout << "\nNo *.pcd file given => Generating example point cloud.\n\n";
        for (float x=-0.5f; x<=0.5f; x+=0.01f)
        {
        for (float y=-0.5f; y<=0.5f; y+=0.01f)
        {
            PointType point;  point.x = x;  point.y = y;  point.z = 2.0f - y;
            point_cloud.points.push_back (point);
        }
        }
        point_cloud.width = point_cloud.size (); 
         point_cloud.height = 1;  //无序点云
    }


    //Step5 由PCD点云生成深度图
        float noise_level = 0.0;
        float min_range = 0.0f;
        int border_size = 1;      //设置深度图参数

        pcl::RangeImage::Ptr range_image_ptr (new pcl::RangeImage);
        pcl::RangeImage& range_image = *range_image_ptr;   //创建深度图对象
                                                           //注意,下边的语句可以进行弧度角度转换
        range_image.createFromPointCloud (point_cloud, angular_resolution, pcl::deg2rad (360.0f), pcl::deg2rad (180.0f),
                                        scene_sensor_pose, coordinate_frame, noise_level, min_range, border_size);
        range_image.integrateFarRanges (far_ranges);
        if (setUnseenToMaxRange)
            range_image.setUnseenToMaxRange ();


   /*********************************************************************************************************
   创建RangeImageBorderExtractor对象,它是用来进行边缘提取的,因为NARF的第一步就是需要探测出深度图像的边缘,
   
   *********************************************************************************************************/
   //Step6 :创建3D viewer并显示点云+深度图
  pcl::visualization::PCLVisualizer viewer ("3D Viewer");
  viewer.setBackgroundColor (1, 1, 1);
  pcl::visualization::PointCloudColorHandlerCustom<pcl::PointWithRange> range_image_color_handler (range_image_ptr, 0, 0, 0);
  viewer.addPointCloud (range_image_ptr, range_image_color_handler, "range image");
  viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "range image");
  //viewer.addCoordinateSystem (1.0f, "global");
  //PointCloudColorHandlerCustom<PointType> point_cloud_color_handler (point_cloud_ptr, 150, 150, 150);
  //viewer.addPointCloud (point_cloud_ptr, point_cloud_color_handler, "original point cloud");
  viewer.initCameraParameters (); //??
  //setViewerPose (viewer, range_image.getTransformationToWorldSystem ());

  pcl::visualization::RangeImageVisualizer range_image_widget ("Range image");
  range_image_widget.showRangeImage (range_image);             //创建深度图显示对象,注意与PCD显示不一样


  //STEP7!! 提取NARF关键点
  pcl::RangeImageBorderExtractor range_image_border_extractor;               用来提取边缘
  pcl::NarfKeypoint narf_keypoint_detector (&range_image_border_extractor);  //用来检测关键点
  narf_keypoint_detector.setRangeImage (&range_image);
  narf_keypoint_detector.getParameters ().support_size = support_size;
  //narf_keypoint_detector.getParameters ().add_points_on_straight_edges = true;
  //narf_keypoint_detector.getParameters ().distance_for_additional_points = 0.5;
  
  pcl::PointCloud<int> keypoint_indices;
  narf_keypoint_detector.compute (keypoint_indices);   //这个索引里都包含了什么??
  std::cout << "Found "<<keypoint_indices.size ()<<" key points.\n";


   //Step8 显示关键点 (将关键点存储成点云,并显示)
  pcl::PointCloud<pcl::PointXYZ>::Ptr keypoints_ptr (new pcl::PointCloud<pcl::PointXYZ>);
  pcl::PointCloud<pcl::PointXYZ>& keypoints = *keypoints_ptr;
  keypoints.resize (keypoint_indices.size ());
  for (std::size_t i=0; i<keypoint_indices.size (); ++i)
    keypoints[i].getVector3fMap () = range_image[keypoint_indices[i]].getVector3fMap ();//!!

  pcl::visualization::PointCloudColorHandlerCustom<pcl::PointXYZ> keypoints_color_handler (keypoints_ptr, 0, 255, 0);
  viewer.addPointCloud<pcl::PointXYZ> (keypoints_ptr, keypoints_color_handler, "keypoints");
  viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 7, "keypoints");


  //--------------------
  // -----Main loop-----
  //--------------------
  while (!viewer.wasStopped ())
  {
    range_image_widget.spinOnce ();  // process GUI events
    viewer.spinOnce ();
    pcl_sleep(0.01);
  }
}

猜你喜欢

转载自blog.csdn.net/weixin_42503785/article/details/112505187