MSCKF main part source code analysis

Source code path:  https://github.com/daniilidis-group/msckf_mono

 

Source code framework

The above figure shows the entire msckf source code framework. Whenever a new image is received, first determine whether to initialize imu . If not, initialize imu. Otherwise, record the imu information between the current image frame and the previous image frame , and then use The recorded imu information is subjected to the imu prediction process of EKF filtering , and then the image and imu information in the image tracker are updated and new features are tracked and generated; after tracking the new image, the error state vector and covariance matrix are augmented. , add new camera states and covariances to the error state vector . Then add, update and determine the removed feature points. Then the feature removal and EKF measurement update process is carried out. Finally, delete the old empty camera state without tracking feature points.
Based on the above process, the source code is divided into the following modules for analysis:

  • IMU initialization

  • EKF's IMU prediction process

  • Tracker traces generate features

  • Augmentation of error state vector and covariance matrix

  • Remove feature points OK

  • EKF measurement update process    

  • Remove empty state

 

1. IMU initialization

Initialization of system in static state

void RosInterface::initialize_imu()
{
    
    
  Eigen::Vector3f accel_accum;
  Eigen::Vector3f gyro_accum;
  int num_readings = 0;
 
  accel_accum.setZero();
  gyro_accum.setZero();
 
  // 累加初始一段时间内imu的测量值
  for (const auto &entry : imu_queue_)
  {
    
    
    auto imu_time = std::get<0>(entry);
    auto imu_reading = std::get<1>(entry);
 
    accel_accum += imu_reading.a;
    gyro_accum += imu_reading.omega;
    num_readings++;
  }
 
  // 计算初始静止时间段内加速度和角速度的平均值
  Eigen::Vector3f accel_mean = accel_accum / num_readings;
  Eigen::Vector3f gyro_mean = gyro_accum / num_readings;
 
  // 将静止时刻陀螺仪角速度均值作为角速度偏置
  init_imu_state_.b_g = gyro_mean;
  init_imu_state_.g << 0.0, 0.0, -9.81; //默认重力加速度
  // 由静止时刻imu加速度均值向量与默认重力加速度向量之间的轴角转换作为初始时刻imu状态的姿态信息
  init_imu_state_.q_IG = Quaternion<float>::FromTwoVectors(
      -init_imu_state_.g, accel_mean);
 
  init_imu_state_.b_a = init_imu_state_.q_IG * init_imu_state_.g + accel_mean; //0
 
  init_imu_state_.p_I_G.setZero(); //imu初始状态位置信息为0
  init_imu_state_.v_I_G.setZero(); //imu初始状态速度信息为0
  const auto q = init_imu_state_.q_IG;
}

Understanding : When the system is initialized, the position and velocity are both zero, and the attitude is obtained by converting the average acceleration and gravity acceleration; the zero deviation of the angular velocity is the average of the resting angular velocity, and the zero deviation of the acceleration is 0

 

2. EKF’s IMU prediction process

2.1 Theoretical part ( https://blog.csdn.net/hltt3838/article/details/113647649 )

Understanding: Because the update frequency of the IMU is greater than the update screen rate of the image, when there is no image update, the state of the system is deduced based on the modeling equation of the IMU;

 

2.2 Code part

  void propagate(imuReading<_S> &measurement_)
  {
    
    
    //按论文中的公式(10)计算F矩阵和G矩阵
    calcF(imu_state_, measurement_);
    calcG(imu_state_);
 
    // 预测k+1时刻imu状态(使用5阶龙格库塔法积分)
    imuState<_S> imu_state_prop = propogateImuStateRK(imu_state_, measurement_);
 
    // 预测协方差
    // F * dt
    F_ *= measurement_.dT;
 
    // Matrix exponential
    Phi_ = F_.exp();
 
 
    // 看这篇论文,感觉是考虑到了EKF的不一致性,IMU在这作出了补偿,零空间
    // Apply observability constraints - enforce nullspace of Phi
    // Ref: Observability-constrained Vision-aided Inertial Navigation, Hesch J.
    // et al. Feb, 2012
    Matrix3<_S> R_kk_1(imu_state_.q_IG_null);
    Phi_.template block<3, 3>(0, 0) =
        imu_state_prop.q_IG.toRotationMatrix() * R_kk_1.transpose();
 
    Vector3<_S> u = R_kk_1 * imu_state_.g;
    RowVector3<_S> s = (u.transpose() * u).inverse() * u.transpose();
 
    Matrix3<_S> A1 = Phi_.template block<3, 3>(6, 0);
    Vector3<_S> tmp = imu_state_.v_I_G_null - imu_state_prop.v_I_G;
    Vector3<_S> w1 = vectorToSkewSymmetric(tmp) * imu_state_.g;
    Phi_.template block<3, 3>(6, 0) = A1 - (A1 * u - w1) * s;
 
    Matrix3<_S> A2 = Phi_.template block<3, 3>(12, 0);
    tmp = measurement_.dT * imu_state_.v_I_G_null +
          imu_state_.p_I_G_null - imu_state_prop.p_I_G;
    Vector3<_S> w2 = vectorToSkewSymmetric(tmp) * imu_state_.g;
    Phi_.template block<3, 3>(12, 0) = A2 - (A2 * u - w2) * s;
 
    Matrix<_S, 15, 15> imu_covar_prop = Phi_ * (imu_covar_ + G_ * noise_params_.Q_imu * G_.transpose() * measurement_.dT) * Phi_.transpose();
 
    // Apply updates directly
    imu_state_ = imu_state_prop;
    imu_state_.q_IG_null = imu_state_.q_IG;
    imu_state_.v_I_G_null = imu_state_.v_I_G;
    imu_state_.p_I_G_null = imu_state_.p_I_G;
 
    imu_covar_ = (imu_covar_prop + imu_covar_prop.transpose()) / 2.0;
    imu_cam_covar_ = Phi_ * imu_cam_covar_;
  }

  void calcF(), void calcG

  inline void calcF(const imuState<_S> &imu_state_k,
                    const imuReading<_S> &measurement_k)
  {
    
    
    /* Multiplies the error state in the linearized continuous-time
           error state model */
    F_.setZero();
 
    // 计算角速度和加速度名义状态
    Vector3<_S> omegaHat, aHat;
    omegaHat = measurement_k.omega - imu_state_k.b_g;
    aHat = measurement_k.a - imu_state_k.b_a; //论文中有考虑地球自转影响,这里没考虑
    Matrix3<_S> C_IG = imu_state_k.q_IG.toRotationMatrix();
 
    F_.template block<3, 3>(0, 0) = -vectorToSkewSymmetric(omegaHat);
    F_.template block<3, 3>(0, 3) = -Matrix3<_S>::Identity();
    F_.template block<3, 3>(6, 0) = -C_IG.transpose() * vectorToSkewSymmetric(aHat);
    F_.template block<3, 3>(6, 9) = -C_IG.transpose();
    F_.template block<3, 3>(12, 6) = Matrix3<_S>::Identity();
  }
 
  inline void calcG(const imuState<_S> &imu_state_k)
  {
    
    
    /* Multiplies the noise std::vector in the linearized continuous-time
           error state model */
    G_.setZero();
 
    Matrix3<_S> C_IG = imu_state_k.q_IG.toRotationMatrix();
 
    G_.template block<3, 3>(0, 0) = -Matrix3<_S>::Identity();
    G_.template block<3, 3>(3, 3) = Matrix3<_S>::Identity();
    G_.template block<3, 3>(6, 6) = -C_IG.transpose();
    G_.template block<3, 3>(9, 9) = Matrix3<_S>::Identity();
  }

 

3. Tracker tracking features

3.1 Theoretical part

https://blog.csdn.net/hltt3838/article/details/112525638

 

3.2 Code part

Feature tracking (optical flow method)

void CornerTracker::track_features(cv::Mat img_1, cv::Mat img_2, Point2fVector& points1, Point2fVector& points2, IdVector& id1, IdVector& id2)
{
    
    
  std::vector<uchar> status;
 
  std::vector<float> err;
 //使用具有金字塔的迭代Lucas-Kanade方法计算稀疏特征集的光流 
  cv::calcOpticalFlowPyrLK(img_1, img_2,
                           points1, points2,
                           status, err,
                           window_size_, max_level_,
                           termination_criteria_,
                           cv::OPTFLOW_USE_INITIAL_FLOW, min_eigen_threshold_);
 
  int h = img_1.rows;
  int w = img_1.cols;
 
  // 移除跟踪失败的或者在图像边界之外的特征点
  // remove failed or out of image points
  int indexCorrection = 0;
  for(int i=0; i<status.size(); i++)
{
    
    
    cv::Point2f pt = points2.at(i- indexCorrection);
    cv::Point2f dist_vector = points2.at(i-indexCorrection)-points1.at(i-indexCorrection);
    double dist = std::sqrt(dist_vector.x*dist_vector.x+dist_vector.y*dist_vector.y);
    if(dist>25.0 || (status.at(i) == 0)||(pt.x<0)||(pt.y<0)||(pt.x>w)||(pt.y>h))	
   {
    
    
      if((pt.x<0)||(pt.y<0)||(pt.x>w)||(pt.y>h))
        status.at(i) = 0;
 
      points1.erase (points1.begin() + (i - indexCorrection));
      points2.erase (points2.begin() + (i - indexCorrection));
 
      id1.erase (id1.begin() + (i - indexCorrection));
      id2.erase (id2.begin() + (i - indexCorrection));
 
      indexCorrection++;
    }
  }
}

Generate features (fast corner points)

track_handler_->new_features()

The https://github.com/uzh-rpg/fast library is used in the source code for corner detection, so I won’t go into details. You can directly refer to opencv's FastFeatureDetector() corner detector principle to understand this part. The principles are the same. You can also use the opencv interface to replace the fast corner detection library.

Note: When compiling MSCKF, errors usually occur because this library is not installed.

 

4. Augmentation of error state vector and covariance matrix

4.1 Theoretical part

state vector augmentation

When a new camera image is obtained, the camera pose needs to be added to the current state vector. The camera pose can be estimated through the IMU:

covariance matrix augmentation

When a new frame of image comes, the new covariance matrix can be written as:

4.2 Code part

   // 与论文中III-C一致
  // Generates a new camera state and adds it to the full state and covariance.
  void augmentState(const int &state_id, const _S &time)
  {
    
    
    map_.clear();
 
    // 依据当前IMU位姿计算出当前相机位姿(论文中公式14)
    // Compute camera_ pose from current IMU pose
    Quaternion<_S> q_CG = camera_.q_CI * imu_state_.q_IG;
 
    q_CG.normalize();
    camState<_S> cam_state;
    cam_state.last_correlated_id = -1;
    cam_state.q_CG = q_CG;
 
    cam_state.p_C_G =
        imu_state_.p_I_G + imu_state_.q_IG.inverse() * camera_.p_C_I;
 
    cam_state.time = time;
    cam_state.state_id = state_id;
 
    // 协方差维度增广
    // Build MSCKF covariance matrix
    if (cam_states_.size())
    {
    
    
      P_.resize(15 + cam_covar_.cols(), 15 + cam_covar_.cols());
      P_.template block<15, 15>(0, 0) = imu_covar_;
      P_.block(0, 15, 15, cam_covar_.cols()) = imu_cam_covar_;
      P_.block(15, 0, cam_covar_.cols(), 15) = imu_cam_covar_.transpose();
      P_.block(15, 15, cam_covar_.rows(), cam_covar_.cols()) = cam_covar_;
    }
    else
    {
    
    
      P_ = imu_covar_;
    }
 
    if (P_.determinant() < -0.000001)
    {
    
    
      //ROS_ERROR("Covariance determinant is negative! %f", P.determinant());
    }
 
    // 计算新相机状态相对于状态向量的jacobi矩阵(论文中公式16)
    MatrixX<_S> J = MatrixX<_S>::Zero(6, 15 + 6 * cam_states_.size());
    J.template block<3, 3>(0, 0) = camera_.q_CI.toRotationMatrix();
    J.template block<3, 3>(3, 0) =
        vectorToSkewSymmetric(imu_state_.q_IG.inverse() * camera_.p_C_I);
    J.template block<3, 3>(3, 12) = Matrix3<_S>::Identity();
 
    // Camera<_S> State Jacobian
    // MatrixX<_S> J = calcJ(imu_state_, cam_states_);
 
    MatrixX<_S> tempMat = MatrixX<_S>::Identity(15 + 6 * cam_states_.size() + 6,
                                                15 + 6 * cam_states_.size());
    tempMat.block(15 + 6 * cam_states_.size(), 0, 6,
                  15 + 6 * cam_states_.size()) = J;
 
    // 增广协方差矩阵
    // Augment the MSCKF covariance matrix
    MatrixX<_S> P_aug = tempMat * P_ * tempMat.transpose();
 
    MatrixX<_S> P_aug_sym = (P_aug + P_aug.transpose()) / 2.0;
 
    P_aug = P_aug_sym;
 
    // 增广状态向量
    // Break everything into appropriate structs
    cam_states_.push_back(cam_state);
    imu_covar_ = P_aug.template block<15, 15>(0, 0);
 
    // 更新相机协方差(P_CC)和相机imu协方差(P_IC)
    cam_covar_.resize(P_aug.rows() - 15, P_aug.cols() - 15);
    cam_covar_ = P_aug.block(15, 15, P_aug.rows() - 15, P_aug.cols() - 15);
 
    imu_cam_covar_.resize(15, P_aug.cols() - 15);
    imu_cam_covar_ = P_aug.block(0, 15, 15, P_aug.cols() - 15);
 
    VectorX<_S> cov_diag = imu_covar_.diagonal();
  }

5. Determine the removal of feature points

  void update(const std::vector<Vector2<_S>, Eigen::aligned_allocator<Vector2<_S>>> &measurements,
              const std::vector<size_t> &feature_ids)
  {
    
    
 
    feature_tracks_to_residualize_.clear();
    tracks_to_remove_.clear();
 
    int id_iter = 0;
    // feature_ids指代当前特征点索引集
    // tracked_feature_ids_指代已跟踪的特征的索引集
    // Loop through all features being tracked
    for (auto feature_id : tracked_feature_ids_)
    {
    
    
      // Check if old feature is seen in current measurements
      auto input_feature_ids_iter =
          find(feature_ids.begin(), feature_ids.end(), feature_id);
      bool is_valid = (input_feature_ids_iter != feature_ids.end());
 
      // If so, get the relevant track
      auto track = feature_tracks_.begin() + id_iter;
 
      // 如果跟踪的特征点出现在当前图像中,则将当前观测信息记录到对应特征的分组中
      // If we're still tracking this point, add the observation
      if (is_valid)
      {
    
    
        // 根据特征点对观测数据进行分组
        size_t feature_ids_dist =
            distance(feature_ids.begin(), input_feature_ids_iter);
        track->observations.push_back(measurements[feature_ids_dist]);
 
        // 更新相机状态量中的特征点集
        auto cam_state_iter = cam_states_.end() - 1;
        cam_state_iter->tracked_feature_ids.push_back(feature_id);
 
        // 记录特征点分组中相机状态索引
        track->cam_state_indices.push_back(cam_state_iter->state_id);
      }
 
      // 如果特征点未出现在当前图像帧中,或者特征点被跟踪的次数超过给定阈值,则移除特征点
      // If corner is not valid or track is too long, remove track to be residualized
      if (!is_valid || (track->observations.size() >=
                        msckf_params_.max_track_length))
      {
    
    
        // 移除观测到"移除特征点"的相机状态中的"移除特征点",并记录对应的相机状态
        featureTrackToResidualize<_S> track_to_residualize;
        removeTrackedFeature(feature_id, track_to_residualize.cam_states,
                             track_to_residualize.cam_state_indices);
 
        // 如果"移除特征点"被跟踪的次数超过设定的阈值,则将track的相关信息记录到 track_to_residualize 中
        // feature_tracks_to_residualize_中记录了被丢弃的特征点信息
        // If track is long enough, add to the residualized list
        if (track_to_residualize.cam_states.size() >=
            msckf_params_.min_track_length)
        {
    
    
          track_to_residualize.feature_id = track->feature_id;
          track_to_residualize.observations = track->observations;
          track_to_residualize.initialized = track->initialized;
          if (track->initialized)
            track_to_residualize.p_f_G = track->p_f_G;
 
          feature_tracks_to_residualize_.push_back(track_to_residualize);
        }
 
        // 记录要移除的特征点索引
        tracks_to_remove_.push_back(feature_id);
      }
 
      id_iter++;
    }
 
    // TODO: Double check this stuff and maybe use use non-pointers for accessing
    // elements so that it only requires one pass
    // 移除 tracks_to_remove_ 中的特征点
    for (auto feature_id : tracks_to_remove_)
    {
    
    
      auto track_iter = feature_tracks_.begin();
      while (track_iter != feature_tracks_.end())
      {
    
    
        if (track_iter->feature_id == feature_id)
        {
    
    
          size_t last_id = track_iter->cam_state_indices.back();
          for (size_t index : track_iter->cam_state_indices)
          {
    
    
            for (auto &camstate : cam_states_)
            {
    
    
              if (!camstate.tracked_feature_ids.size() &&
                  camstate.state_id == index)
              {
    
    
                camstate.last_correlated_id = last_id;
              }
            }
          }
          track_iter = feature_tracks_.erase(track_iter);
          break;
        }
        else
          track_iter++;
      }
 
      auto corresponding_id = std::find(tracked_feature_ids_.begin(),
                                        tracked_feature_ids_.end(), feature_id);
 
      if (corresponding_id != tracked_feature_ids_.end())
      {
    
    
        tracked_feature_ids_.erase(corresponding_id);
      }
    }
  }

6. EKF measurement update process

  void marginalize()
  {
    
    
    // 如果被丢弃的特征点数量不为0
    if (!feature_tracks_to_residualize_.empty())
    {
    
    
      int num_passed, num_rejected, num_ransac, max_length, min_length;
      _S max_norm, min_norm;
      num_passed = 0;
      num_rejected = 0;
      num_ransac = 0;
      max_length = -1;
      min_length = std::numeric_limits<int>::max();
      max_norm = -1;
      min_norm = std::numeric_limits<_S>::infinity();
 
      std::vector<bool> valid_tracks;
      std::vector<Vector3<_S>, Eigen::aligned_allocator<Vector3<_S>>> p_f_G_vec;
      int total_nObs = 0;
 
      for (auto track = feature_tracks_to_residualize_.begin();
           track != feature_tracks_to_residualize_.end(); track++)
      {
    
    
        // 如果被丢弃的有效特征点数量超过3个时,需要检查丢弃特征点内的相机状态是否移动了足够距离
        if (num_feature_tracks_residualized_ > 3 &&
            !checkMotion(track->observations.front(), track->cam_states))
        {
    
    
          num_rejected += 1;
          valid_tracks.push_back(false);
          continue;
        }
 
        Vector3<_S> p_f_G;
        _S Jcost, RCOND;
 
        // 估计特征点位置
        // Estimate feature 3D location with intersection, LM
        bool isvalid =
            initializePosition(track->cam_states, track->observations, p_f_G);
 
        if (isvalid)
        {
    
    
          track->initialized = true;
          track->p_f_G = p_f_G;
          map_.push_back(p_f_G); //map_记录特征点位置
        }
 
        p_f_G_vec.push_back(p_f_G); //记录新还原出来的特征点位置
        int nObs = track->observations.size();
 
        // 转换特征点位置到c1坐标系
        Vector3<_S> p_f_C1 = (track->cam_states[0].q_CG.toRotationMatrix()) *
                             (p_f_G - track->cam_states[0].p_C_G);
 
        Eigen::Array<_S, 3, 1> p_f_G_array = p_f_G.array();
 
        if (!isvalid)
        {
    
    
          num_rejected += 1;
          valid_tracks.push_back(false);
        }
        else
        {
    
    
          num_passed += 1;
          valid_tracks.push_back(true);
          total_nObs += nObs;
          if (nObs > max_length)
          {
    
    
            max_length = nObs;
          }
          if (nObs < min_length)
          {
    
    
            min_length = nObs;
          }
 
          num_feature_tracks_residualized_ += 1;
        }
      }
 
      if (!num_passed)
      {
    
    
        return;
      }
      // 初始化投影到零空间后的观测方程中的变量
      MatrixX<_S> H_o = MatrixX<_S>::Zero(2 * total_nObs - 3 * num_passed,
                                          15 + 6 * cam_states_.size());
      MatrixX<_S> R_o = MatrixX<_S>::Zero(2 * total_nObs - 3 * num_passed,
                                          2 * total_nObs - 3 * num_passed);
      VectorX<_S> r_o(2 * total_nObs - 3 * num_passed);
 
      Vector2<_S> rep;
      rep << noise_params_.u_var_prime, noise_params_.v_var_prime;
 
      int stack_counter = 0;
      for (int iter = 0; iter < feature_tracks_to_residualize_.size(); iter++)
      {
    
    
        if (!valid_tracks[iter])
          continue;
 
        featureTrackToResidualize<_S> track = feature_tracks_to_residualize_[iter];
 
        Vector3<_S> p_f_G = p_f_G_vec[iter];
        VectorX<_S> r_j = calcResidual(p_f_G, track.cam_states, track.observations); //计算视觉测量残差,与论文中公式18,19,20一致
 
        int nObs = track.observations.size();
        MatrixX<_S> R_j = (rep.replicate(nObs, 1)).asDiagonal();
 
        // Calculate H_o_j and residual
        MatrixX<_S> H_o_j, A_j;
        calcMeasJacobian(p_f_G, track.cam_state_indices, H_o_j, A_j); //计算论文公式(23)中的A_T和H_0
 
        // Stacked residuals and friends
        VectorX<_S> r_o_j = A_j.transpose() * r_j;       //将论文公式(22)中的r_j投影到左零空间
        MatrixX<_S> R_o_j = A_j.transpose() * R_j * A_j; //将测量噪声投影到左零空间
 
        if (gatingTest(H_o_j, r_o_j, track.cam_states.size() - 1))
        {
    
    
          r_o.segment(stack_counter, r_o_j.size()) = r_o_j;
          H_o.block(stack_counter, 0, H_o_j.rows(), H_o_j.cols()) = H_o_j;
          R_o.block(stack_counter, stack_counter, R_o_j.rows(), R_o_j.cols()) =
              R_o_j;
 
          stack_counter += H_o_j.rows();
        }
      }
 
      H_o.conservativeResize(stack_counter, H_o.cols());
      r_o.conservativeResize(stack_counter);
      R_o.conservativeResize(stack_counter, stack_counter);
 
      // 至此计算出论文中的观测方程中的H_0, r_0, 以及n_0对应的噪声方差矩阵
      // 然后根据ekf的观测更新过程更新状态和协方差
      measurementUpdate(H_o, r_o, R_o);
    }
  }

7. Delete empty status

  // 删除不包含任何激活特征点的相机状态,并更新对应的协方差矩阵
  void pruneEmptyStates()
  {
    
    
    int max_states = msckf_params_.max_cam_states;
    if (cam_states_.size() < max_states)
      return;
    std::vector<size_t> deleteIdx;
    deleteIdx.clear();
 
    size_t num_states = cam_states_.size();
 
    // 找到所有的不带有特征点信息的相机状态,并删除它们
    // Find all cam_states_ with no tracked landmarks and prune them
    auto camState_it = cam_states_.begin();
    size_t num_deleted = 0;
    int camstate_pos = 0;
    int num_cam_states = cam_states_.size();
 
    int last_to_remove = num_cam_states - max_states - 1;
 
    if (cam_states_.front().tracked_feature_ids.size())
    {
    
    
      return;
    }
 
    for (int i = 1; i < num_cam_states - max_states; i++)
    {
    
    
      if (cam_states_[i].tracked_feature_ids.size())
      {
    
    
        last_to_remove = i - 1;
        break;
      }
    }
 
    //删除无效相机状态
    for (int i = 0; i <= last_to_remove; ++i)
    {
    
    
      deleteIdx.push_back(camstate_pos + num_deleted);
      pruned_states_.push_back(*camState_it);
      camState_it = cam_states_.erase(camState_it);
      num_deleted++;
    }
 
    if (deleteIdx.size() != 0)
    {
    
    
      int n_remove = 0;
      int n_keep = 0;
      std::vector<bool> to_keep(num_states, false);
      for (size_t IDx = 0; IDx < num_states; IDx++)
      {
    
    
        if (find(deleteIdx.begin(), deleteIdx.end(), IDx) != deleteIdx.end())
          n_remove++;
        else
        {
    
    
          to_keep[IDx] = true;
          n_keep++;
        }
      }
 
      //更新对应的协方差(直接移除状态位对应的协方差位)
      int remove_counter = 0;
      int keep_counter = 0;
      VectorXi keepCovarIdx(6 * n_keep);
      VectorXi removeCovarIdx(6 * n_remove);
      for (size_t IDx = 0; IDx < num_states; IDx++)
      {
    
    
        if (!to_keep[IDx])
        {
    
    
          removeCovarIdx.segment<6>(6 * remove_counter) =
              VectorXi::LinSpaced(6, 6 * IDx, 6 * (IDx + 1) - 1);
          remove_counter++;
        }
        else
        {
    
    
          keepCovarIdx.segment<6>(6 * keep_counter) =
              VectorXi::LinSpaced(6, 6 * IDx, 6 * (IDx + 1) - 1);
          keep_counter++;
        }
      }
 
      MatrixX<_S> prunedCamCovar;
      square_slice(cam_covar_, keepCovarIdx, prunedCamCovar);
      cam_covar_.resize(prunedCamCovar.rows(), prunedCamCovar.cols());
      cam_covar_ = prunedCamCovar;
 
      Matrix<_S, 15, Dynamic> prunedImuCamCovar;
      column_slice(imu_cam_covar_, keepCovarIdx, prunedImuCamCovar);
      imu_cam_covar_.resize(prunedImuCamCovar.rows(), prunedImuCamCovar.cols());
      imu_cam_covar_ = prunedImuCamCovar;
    }
 
    // TODO: Additional outputs = deletedCamCovar (used to compute sigma),
    // deletedCamStates
  }

Guess you like

Origin blog.csdn.net/xiaojinger_123/article/details/127886083