PL-SVO

pl-svo对第一帧提取点和线段特征,点特征直接保存为Point2f就行,对于线段特征保存线段的两个端点

void detectFeatures(
    FramePtr frame,
    vector<cv::Point2f>& px_vec,
    vector<Vector3d>& f_vec)

提取点和线段特征

  list<PointFeat*> new_features;
  list<LineFeat*>  new_features_ls;

  if(Config::initPoints())
  {
      feature_detection::FastDetector detector(
          frame->img().cols, frame->img().rows, Config::gridSize(), Config::nPyrLevels());
      detector.detect(frame.get(), frame->img_pyr_, Config::triangMinCornerScore(), new_features);
  }

  if(Config::initLines())
  {
      feature_detection::LsdDetector detector_ls(
          frame->img().cols, frame->img().rows, Config::gridSizeSegs(), Config::nPyrLevelsSegs());
      detector_ls.detect(frame.get(), frame->img_pyr_, Config::lsdMinLength(), new_features_ls);
  }

保存点和线段特征到vector<cv::Point2f>& px_vec,对于线段特征,储存的是两个端点和中点三个点的坐标

  // First try, introduce endpoints (line segments usually belongs to planes)
  std::for_each(new_features_ls.begin(), new_features_ls.end(), [&](LineFeat* ftr){
    px_vec.push_back(cv::Point2f(ftr->spx[0], ftr->spx[1]));
    f_vec.push_back(ftr->sf);
    px_vec.push_back(cv::Point2f((ftr->spx[0]+ftr->epx[0])/2.0, (ftr->spx[1]+ftr->epx[1])/2.0));
    f_vec.push_back((ftr->sf+ftr->ef)/2.0);
    px_vec.push_back(cv::Point2f(ftr->epx[0], ftr->epx[1]));
    f_vec.push_back(ftr->ef);
    delete ftr;

然后第二张图像进来,不在进行特征提取,进行金字塔光流跟踪

void trackKlt(
    FramePtr frame_ref,
    FramePtr frame_cur,
    vector<cv::Point2f>& px_ref,
    vector<cv::Point2f>& px_cur,
    vector<Vector3d>& f_ref,
    vector<Vector3d>& f_cur,
    vector<double>& disparities)
  const double klt_win_size = 30.0;
  const int klt_max_iter = 30;
  const double klt_eps = 0.001;
  vector<uchar> status;
  vector<float> error;
  vector<float> min_eig_vec;
  cv::TermCriteria termcrit(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, klt_max_iter, klt_eps);
  cv::calcOpticalFlowPyrLK(frame_ref->img_pyr_[0], frame_cur->img_pyr_[0],
                           px_ref, px_cur,
                           status, error,
                           cv::Size2i(klt_win_size, klt_win_size),
                           4, termcrit, cv::OPTFLOW_USE_INITIAL_FLOW);

计算正确跟踪点的视差和三维空间向量

  vector<cv::Point2f>::iterator px_ref_it = px_ref.begin();
  vector<cv::Point2f>::iterator px_cur_it = px_cur.begin();
  vector<Vector3d>::iterator f_ref_it = f_ref.begin();
  f_cur.clear(); f_cur.reserve(px_cur.size());
  disparities.clear(); disparities.reserve(px_cur.size());
  for(size_t i=0; px_ref_it != px_ref.end(); ++i)
  {
    // if the point has not been correctly tracked,
    // remove all occurrences: ref px, ref f, and cur px
    if(!status[i])
    {
      px_ref_it = px_ref.erase(px_ref_it);
      px_cur_it = px_cur.erase(px_cur_it);
      f_ref_it = f_ref.erase(f_ref_it);
      continue;
    }
    f_cur.push_back(frame_cur->c2f(px_cur_it->x, px_cur_it->y));
    disparities.push_back(Vector2d(px_ref_it->x - px_cur_it->x, px_ref_it->y - px_cur_it->y).norm());
    ++px_ref_it;
    ++px_cur_it;
    ++f_ref_it;
  }

其中frame_cur->c2f(px_cur_it->x, px_cur_it->y)把特征像素点转换成在相机坐标系下的深度归一化的点,并进行畸变校正,再让模变成1,映射到单位球面上面。

 inline Vector3d c2f(const Vector2d& px) const { return cam_->cam2world(px[0], px[1]); }

然后对接下来的帧进入FrameHandlerMono::processFrame进行处理

使用上一帧图像的位姿,用作当前图像的初始位姿。然后进行稀疏图像对齐

猜你喜欢

转载自www.cnblogs.com/feifanrensheng/p/10505243.html
PL