OpenCV 应用RANSAC于特征匹配

代码比较详细,发在博客做一个学习记录

class RobustMatcher {
private:
    // 特征点检测器对象的指针
    cv::Ptr<cv::FeatureDetector> detector;
    // 特征描述子提取器对象的指针
    cv::Ptr<cv::DescriptorExtractor> descriptor;
    int normType;
    float ratio; // 第一个和第二个 NN 之间的最大比率
    bool refineF; // 如果等于 true,则会优化基础矩阵
    bool refineM; // 如果等于 true,则会优化匹配结果
    double distance; // 到极点的最小距离
    double confidence; // 可信度(概率)
public:
    RobustMatcher(const cv::Ptr<cv::FeatureDetector>& detector,
        const cv::Ptr<cv::DescriptorExtractor>& descriptor =
        cv::Ptr<cv::DescriptorExtractor>()) :
        detector(detector), descriptor(descriptor),
        normType(cv::NORM_L2), ratio(0.8f),
        refineF(true), refineM(true),
        confidence(0.98), distance(1.0) {
        // 这里使用关联描述子
        if (!this->descriptor) {
            this->descriptor = this->detector;
        }
    }


    cv::Mat ransacTest(const std::vector<cv::DMatch>& matches,
        std::vector<cv::KeyPoint>& keypoints1,
        std::vector<cv::KeyPoint>& keypoints2,
        std::vector<cv::DMatch>& outMatches) {
        // 将关键点转换为 Point2f 类型
        std::vector<cv::Point2f> points1, points2;
        for (std::vector<cv::DMatch>::const_iterator it = matches.begin();
            it != matches.end(); ++it) {
            // 获取左侧关键点的位置
            points1.push_back(keypoints1[it->queryIdx].pt);
            // 获取右侧关键点的位置
            points2.push_back(keypoints2[it->trainIdx].pt);
        }
        // 用 RANSAC 计算 F 矩阵
        std::vector<uchar> inliers(points1.size(), 0);
        cv::Mat fundamental =
            cv::findFundamentalMat(points1,
                points2, // 匹配像素点
                inliers, // 匹配状态( inlier 或 outlier)
                cv::FM_RANSAC, // RANSAC 算法
                distance, // 到对极线的距离
                confidence); // 置信度
                // 取出剩下的(inliers)匹配项
        std::vector<uchar>::const_iterator itIn = inliers.begin();
        std::vector<cv::DMatch>::const_iterator itM = matches.begin();
        // 遍历所有匹配项
        for (; itIn != inliers.end(); ++itIn, ++itM) {
            if (*itIn) { // it is a valid match
                outMatches.push_back(*itM);
            }
        }
        return fundamental;
    }


    // 用 RANSAC 算法匹配特征点
// 返回基础矩阵和输出的匹配项
    cv::Mat match(cv::Mat& image1, cv::Mat& image2, // 输入图像
        std::vector<cv::DMatch>& matches, // 输出匹配项
        std::vector<cv::KeyPoint>& keypoints1, // 输出关键点
        std::vector<cv::KeyPoint>& keypoints2) {
        // 1.检测特征点
        detector->detect(image1, keypoints1);
        detector->detect(image2, keypoints2);
        // 2.提取特征描述子
        cv::Mat descriptors1, descriptors2;
        descriptor->compute(image1, keypoints1, descriptors1);
        descriptor->compute(image2, keypoints2, descriptors2);
        // 3.匹配两幅图像描述子
        // (用于部分检测方法)
        // 构造匹配类的实例(带交叉检查)
        cv::BFMatcher matcher(normType, // 差距衡量
            true); // 交叉检查标志
            // 匹配描述子
        std::vector<cv::DMatch> outputMatches;
        matcher.match(descriptors1, descriptors2, outputMatches);
        // 4.用 RANSAC 算法验证匹配项
        cv::Mat fundamental = ransacTest(outputMatches,
            keypoints1, keypoints2,
            matches);
        // 返回基础矩阵
        return fundamental;
    }
};
// 准备匹配器(用默认参数)
// SIFT 检测器和描述;
int main() {
    cv::Mat image = cv::imread("hy.jpg");
    cv::pyrDown(image, image);
    cv::Mat image1 = image.clone(),image2 = image.clone();
    RobustMatcher rmatcher(cv::xfeatures2d::SIFT::create(250));
    // 匹配两幅图像
    std::vector<cv::DMatch> matches;
    std::vector<cv::KeyPoint> keypoints1, keypoints2;
    cv::Mat fundamental = rmatcher.match(image1, image2,
        matches, keypoints1, keypoints2);
    
    cv::Mat imageMatches;
    cv::drawMatches(image1, keypoints1,  // 1st image and its keypoints
        image2, keypoints2,  // 2nd image and its keypoints
        matches,			// the matches
        imageMatches,		// the image produced
        cv::Scalar(255, 255, 255),  // color of the lines
        cv::Scalar(255, 255, 255),  // color of the keypoints
        std::vector<char>(),
        2);
    cv::imshow("Matches", imageMatches);


    // Convert keypoints into Point2f	
    std::vector<cv::Point2f> points1, points2;

    for (std::vector<cv::DMatch>::const_iterator it = matches.begin();
        it != matches.end(); ++it) {

        // Get the position of left keypoints
        float x = keypoints1[it->queryIdx].pt.x;
        float y = keypoints1[it->queryIdx].pt.y;
        points1.push_back(keypoints1[it->queryIdx].pt);
        cv::circle(image1, cv::Point(x, y), 3, cv::Scalar(255, 255, 255), 3);
        // Get the position of right keypoints
        x = keypoints2[it->trainIdx].pt.x;
        y = keypoints2[it->trainIdx].pt.y;
        cv::circle(image2, cv::Point(x, y), 3, cv::Scalar(255, 255, 255), 3);
        points2.push_back(keypoints2[it->trainIdx].pt);
    }

    // Draw the epipolar lines
    std::vector<cv::Vec3f> lines1;
    cv::computeCorrespondEpilines(points1, 1, fundamental, lines1);

    for (std::vector<cv::Vec3f>::const_iterator it = lines1.begin();
        it != lines1.end(); ++it) {

        cv::line(image2, cv::Point(0, -(*it)[2] / (*it)[1]),
            cv::Point(image2.cols, -((*it)[2] + (*it)[0] * image2.cols) / (*it)[1]),
            cv::Scalar(255, 255, 255));
    }

    std::vector<cv::Vec3f> lines2;
    cv::computeCorrespondEpilines(points2, 2, fundamental, lines2);

    for (std::vector<cv::Vec3f>::const_iterator it = lines2.begin();
        it != lines2.end(); ++it) {

        cv::line(image1, cv::Point(0, -(*it)[2] / (*it)[1]),
            cv::Point(image1.cols, -((*it)[2] + (*it)[0] * image1.cols) / (*it)[1]),
            cv::Scalar(255, 255, 255));
    }

    // Display the images with epipolar lines
    cv::imshow("Right Image Epilines (RANSAC)", image1);
    cv::imshow("Left Image Epilines (RANSAC)", image2);

    cv::waitKey(0);
    return 0;
}

注释比较详细,代码可读性还比较高,就不多做解释了,下边是结果:

 

 这里用的匹配的两张图是一样的,所以匹配效果看起来很好

发布了31 篇原创文章 · 获赞 38 · 访问量 5037

猜你喜欢

转载自blog.csdn.net/qq_41685265/article/details/104111004