OpenCV图像拼接

参考文章:Opencv实战——图像拼接
原始图像:
1.jpeg在这里插入图片描述
2.jpeg
在这里插入图片描述
基本功能Python实现:

import cv2
import numpy as np


#检测A、B图片的SIFT关键特征点,并计算特征描述子
def detectAndDescribe(image):
    # 建立SIFT生成器
    sift = cv2.SIFT_create()
    # 检测SIFT特征点,并计算描述子
    (kps, features) = sift.detectAndCompute(image, None)
    # 将结果转换成NumPy数组
    kps = np.float32([kp.pt for kp in kps])
    # 返回特征点集,及对应的描述特征
    return (kps, features)

#读取图片
img1 = cv2.imread("1.jpeg")
img2 = cv2.imread("2.jpeg")

#把图片缩放到相同尺寸
img1 = cv2.resize(img1,(640,480))
img2 = cv2.resize(img2,(640,480))

#检测SIFT关键特征点,并计算特征描述子
kps1, features1 = detectAndDescribe(img1)
kps2, features2 = detectAndDescribe(img2)

# 建立暴力匹配器
bf = cv2.BFMatcher()
# 使用KNN检测来自A、B图的SIFT特征匹配对,K=2
matches = bf.knnMatch(features1, features2, 2)
good = []
for m in matches:
    # 当最近距离跟次近距离的比值小于ratio值时,保留此匹配对
    if len(m) == 2 and m[0].distance < m[1].distance * 0.75:
        # 存储两个点在featuresA, featuresB中的索引值
        good.append((m[0].trainIdx, m[0].queryIdx))

# 当筛选后的匹配对大于4时,计算视角变换矩阵
if len(good) > 4:
    # 获取匹配对的点坐标
    pts1 = np.float32([kps1[i] for (_, i) in good])
    pts2 = np.float32([kps2[i] for (i, _) in good])
    # 计算视角变换矩阵
    H, status = cv2.findHomography(pts2, pts1, cv2.RANSAC, 4.0)

# 将图片1进行视角变换,result是变换后图片
trans = cv2.warpPerspective(img2, H, (img1.shape[1] + img2.shape[1], img2.shape[0]))

# 将图片B传入result图片最左端
result = trans
result[0:img1.shape[0], 0:img1.shape[1]] = img1 

cv2.imwrite('result.jpg', result)

在这里插入图片描述
可以看到图像拼接的结果有明显裂缝,还有黑边,因此优化代码:

import cv2
import numpy as np


#检测A、B图片的SIFT关键特征点,并计算特征描述子
def detectAndDescribe(image):
    # 建立SIFT生成器
    sift = cv2.SIFT_create()
    # 检测SIFT特征点,并计算描述子
    (kps, features) = sift.detectAndCompute(image, None)
    # 将结果转换成NumPy数组
    kps = np.float32([kp.pt for kp in kps])
    # 返回特征点集,及对应的描述特征
    return (kps, features)

#读取输入图片
img1 = cv2.imread("1.jpeg")
img2 = cv2.imread("2.jpeg")

img1 = cv2.resize(img1,(640,480))
img2 = cv2.resize(img2,(640,480))

#检测SIFT关键特征点,并计算特征描述子
kps1, features1 = detectAndDescribe(img1)
kps2, features2 = detectAndDescribe(img2)

# 建立暴力匹配器
bf = cv2.BFMatcher()
# 使用KNN检测来自A、B图的SIFT特征匹配对,K=2
matches = bf.knnMatch(features1, features2, 2)
good = []
for m in matches:
    # 当最近距离跟次近距离的比值小于ratio值时,保留此匹配对
    if len(m) == 2 and m[0].distance < m[1].distance * 0.75:
        # 存储两个点在featuresA, featuresB中的索引值
        good.append((m[0].trainIdx, m[0].queryIdx))

# 当筛选后的匹配对大于4时,计算视角变换矩阵
if len(good) > 4:
    # 获取匹配对的点坐标
    pts1 = np.float32([kps1[i] for (_, i) in good])
    pts2 = np.float32([kps2[i] for (i, _) in good])
    # 计算视角变换矩阵
    H, status = cv2.findHomography(pts2, pts1, cv2.RANSAC, 4.0)

# 将图片1进行视角变换,result是变换后图片
trans = cv2.warpPerspective(img2, H, (img1.shape[1] + img2.shape[1], img2.shape[0]))
points = np.mat(H).reshape(3,3)*np.float32([[0,0,1]]).T
start = points[0,0].astype(int)

# 将图片B传入result图片最左端
trans_copy = trans.copy()
result = trans
result[0:img1.shape[0], 0:img1.shape[1]] = img1

# 消除拼接裂缝
for i in range(1, result.shape[0]):
    for j in range(start, img1.shape[1]):
        if (trans_copy[i][j] !=[0,0,0]).any() and (trans_copy[i-1][j] !=[0,0,0]).any():
            alpha = (j-start)/(img1.shape[1]-start)
            result[i][j]=result[i][j]*(1-alpha)+trans_copy[i][j]*alpha

# 去除黑边
for j in range(result.shape[1]-1, 0, -1):
    if (result[0][j] !=[0,0,0]).any():
        up = j
        break
for j in range(result.shape[1]-1, 0, -1):
    if (result[result.shape[0]-1][j] !=[0,0,0]).any():
        down = j
        break
result = result[0:result.shape[0],0:min(up,down)-1]

for i in range(0, result.shape[0]):
    if (result[i][img1.shape[1]] !=[0,0,0]).any():
        up = i
        break
for i in range(result.shape[0]-1,0,-1):
    if (result[i][img1.shape[1]] !=[0,0,0]).any():
        down = i
        break
result = result[up+1:down, 0:result.shape[1]]

cv2.imwrite('result.jpg', result)

在这里插入图片描述
这里结果和本文开头给出的链接相比,基本上完全消除了图像拼接的裂缝,并且去除了黑边。
下面再给出C++的实现版本,毕竟一名成熟的算法工程师是要同时掌握py和cpp的,就当熟悉语法练手一下:

#include <iostream>
#include <opencv2/opencv.hpp>


cv::Mat KeyPoint2Mat(std::vector<cv::KeyPoint>& keypoints)
{
    
    
	cv::Mat keypoints_mat(cv::Size(2, keypoints.size()), CV_32F);
	for (size_t i = 0; i < keypoints_mat.rows; i++)
	{
    
    
		keypoints_mat.at<float>(i, 0) = keypoints[i].pt.x;
		keypoints_mat.at<float>(i, 1) = keypoints[i].pt.y;
	}
	//std::cout << keypoints_mat << std::endl;
	return keypoints_mat;
}


int main(int argc, char** argv)
{
    
    
	cv::Mat img1 = cv::imread("1.jpeg",1);
	cv::Mat img2 = cv::imread("2.jpeg",1);

	cv::resize(img1, img1, cv::Size(640, 480));
	cv::resize(img2, img2, cv::Size(640, 480));

	cv::Ptr<cv::SIFT> detector = cv::SIFT::create();
	std::vector<cv::KeyPoint> kps1, kps2;
	cv::Mat features1, features2;
	detector->detectAndCompute(img1, cv::noArray(), kps1, features1);
	detector->detectAndCompute(img2, cv::noArray(), kps2, features2);

	cv::BFMatcher matcher;
	std::vector<std::vector<cv::DMatch>> matches;
	matcher.knnMatch(features1, features2, matches, 2);
	//std::cout << matches.size() << std::endl;

	std::vector<std::pair<int, int>> good;
	for (auto m : matches)
	{
    
    
		if (m.size() == 2 && m[0].distance < m[1].distance * 0.75)
			good.push_back(std::make_pair(m[0].trainIdx, m[0].queryIdx));
	}
	//std::cout << good.size() << std::endl;

	cv::Mat H;
	if (good.size() > 4)
	{
    
    
		std::vector<cv::KeyPoint> pts1, pts2;
		for (size_t i = 0; i < good.size(); i++)
		{
    
    
			pts1.push_back(kps1[good[i].second]);
			pts2.push_back(kps2[good[i].first]);
		}
		H = cv::findHomography(KeyPoint2Mat(pts2), KeyPoint2Mat(pts1), cv::RANSAC, 4.0);
		//std::cout << H << std::endl;
	}

	cv::Mat trans;
	cv::warpPerspective(img2, trans, H, cv::Size(img1.cols + img2.cols, img2.rows));
	//cv::imwrite("trans.jpg", trans);

	cv::Mat left_top = (cv::Mat_<double>(3, 1) << 0, 0, 1);
	cv::Mat points = H * left_top;
	int start = (int) points.at<double>(0, 0);
	//std::cout << start << std::endl;

	cv::Mat trans_copy = trans.clone();
	cv::Mat result = trans;
	cv::Mat roi = result(cv::Rect(0, 0, img1.cols, img1.rows));
	img1.copyTo(roi);

	for (size_t i = 1; i < result.rows; i++)
	{
    
    
		for (size_t j = start; j < img1.cols; j++)
		{
    
    
			if (trans_copy.at<cv::Vec3b>(i, j) != cv::Vec3b(0, 0, 0) && trans_copy.at<cv::Vec3b>(i - 1, j)!= cv::Vec3b(0, 0, 0))
			{
    
    
				float alpha = (float) (j - start) / (img1.cols - start);
				result.at<cv::Vec3b>(i, j) = result.at<cv::Vec3b>(i, j) * (1 - alpha) + trans_copy.at<cv::Vec3b>(i, j) * alpha;
			}
		}
	}

	int up, down;
	for (size_t j = result.cols-1; j > 0; j--)
	{
    
    
		if (result.at<cv::Vec3b>(0, j) != cv::Vec3b(0, 0, 0))
		{
    
    
			up = j;
			break;
		}
	}
	for (size_t j = result.cols - 1; j > 0; j--)
	{
    
    
		if (result.at<cv::Vec3b>(result.rows - 1, j) != cv::Vec3b(0, 0, 0))
		{
    
    
			down = j;
			break;
		}
	}
	result = result(cv::Rect(0, 0, std::min(up,down)-1 , result.rows));

	for (size_t i = 0; i < result.rows; i++)
	{
    
    
		if (result.at<cv::Vec3b>(i, img1.cols) != cv::Vec3b(0, 0, 0))
		{
    
    
			up = i;
			break;
		}
	}
	for (size_t i = result.rows - 1; i > 0; i--)
	{
    
    
		if (result.at<cv::Vec3b>(i, img1.cols) != cv::Vec3b(0, 0, 0))
		{
    
    
			down = i;
			break;
		}
	}
	cv::Mat img = result(cv::Rect(0, up + 1, result.cols, down - up - 1));

	cv::imwrite("result.jpg", img);
	return 0;
}

猜你喜欢

转载自blog.csdn.net/taifyang/article/details/129465110