基于opencv进行拼接图片

#define _CRT_SECURE_NO_WARNINGS
#include <iostream>
#include <string>
#include <vector>
#include <opencv/cv.h>
#include <opencv/cxcore.h>
#include <opencv2/opencv.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/core.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include "ConcateImage.h"
using namespace cv;
using namespace std;

//------------------------------拼接图片---------------------------------------------//
Mat ConcateImage(char *path1, char *path2, char *path3, int minThres=50, int maxThres=220, bool fbArea=true, int minA=10, int maxA=10000, int bColor=255)
{
	//string path1("E:\\Project\\C085\\Codes\\02Code\\3DLMI\\C085LMI\\Image\\A1.png");
	//string path2("E:\\Project\\C085\\Codes\\02Code\\3DLMI\\C085LMI\\Image\\A2.png");
	Mat unchangeImg1 = imread(path1, CV_LOAD_IMAGE_UNCHANGED);
	Mat unchangeImg2 = imread(path2, CV_LOAD_IMAGE_UNCHANGED);
	Mat img1 = imread(path1, 0);
	Mat img2 = imread(path2, 0);
	//flip(img1, img1, 0);
	//flip(img2, img2, 0);
	SimpleBlobDetector::Params params;
	params.minThreshold = minThres;
	params.maxThreshold = maxThres;
	params.filterByArea = fbArea;
	params.minArea =minA;
	params.maxArea = maxA;
	params.blobColor = bColor;
	SimpleBlobDetector detector(params);
	//Ptr<SimpleBlobDetector> detector = SimpleBlobDetector::create(params);
	vector<KeyPoint> keypoints1;
	vector<KeyPoint> keypoints2;
	detector.detect(img1, keypoints1);
	detector.detect(img2, keypoints2);
	/*
	if (keypoints1.size() == 0) {
	cout << "keypoints1 no element" << endl;
	}
	else {
	int i = 0;
	for (i = 0; i < keypoints1.size(); ++i) {
	cout << "keypoints1 num " << i << " : " << keypoints1[i].pt << endl;
	}
	}
	if (keypoints2.size() == 0) {
	cout << "keypoints2 no element" << endl;
	}
	else {
	int i = 0;
	for (i = 0; i < keypoints2.size(); ++i) {
	cout << "keypoints2 num " << i << " : " << keypoints2[i].pt << endl;
	}
	}
	*/
	//Mat img_with_keypoints1;
	//Mat img_with_keypoints2;
	//drawKeypoints(img1, keypoints1, img_with_keypoints1, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	//drawKeypoints(img2, keypoints2, img_with_keypoints2, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	//namedWindow("keypoints1", CV_WINDOW_NORMAL);
	//imshow("keypoints1", img_with_keypoints1);
	//namedWindow("keypoints2", CV_WINDOW_NORMAL);
	//imshow("keypoints2", img_with_keypoints2);
	//
	Mat cutImg1;
	Mat cutImg2;
	cutImg1 = unchangeImg1(Rect(0, 0, keypoints1[0].pt.x, keypoints1[0].pt.y));
	cutImg2 = unchangeImg2(Rect(keypoints2[0].pt.x, 0, img2.cols - keypoints2[0].pt.x, keypoints2[0].pt.y));
	int minRow = cutImg1.rows < cutImg2.rows ? cutImg1.rows : cutImg2.rows;
	int minCol = cutImg1.cols < cutImg2.cols ? cutImg1.cols : cutImg2.cols;
	cutImg1 = cutImg1(Rect(cutImg1.cols - minCol, cutImg1.rows - minRow, minCol, minRow));
	cutImg2 = cutImg2(Rect(cutImg2.cols - minCol, cutImg2.rows - minRow, minCol, minRow));
	//cout << "cut image 1 : size : " << cutImg1.size() << endl;
	//cout << "cut image 2 : size : " << cutImg2.size() << endl;
	//namedWindow("Cut Image 1", CV_WINDOW_NORMAL);
	//imshow("Cut Image 1", cutImg1);
	//namedWindow("Cut Image 2", CV_WINDOW_NORMAL);
	//imshow("Cut Image 2", cutImg2);
	//waitKey(0);
	//destroyAllWindows();
	//

	//jun heng yan se
	int needRow = minRow / 3;
	int img1Step0 = cutImg1.step[0];
	int img1Step1 = cutImg1.step[1];
	int img2Step0 = cutImg2.step[0];
	int img2Step1 = cutImg2.step[1];
	int img1NeedCol = minCol - 1;
	double s1=0, s2=0;
	int ms1=0, ms2=0;
	for (int i = 0; i < 10; ++i) {
		ushort* ptr1 = cutImg1.ptr<ushort>(needRow + i * 5);
		ushort* ptr2 = cutImg2.ptr<ushort>(needRow + i * 5);
		s1 += ptr1[img1NeedCol];
		s2 += ptr2[0];
	}
	int pixelDiff = (int)((s1 - s2) / 10);
	for (int i = 0; i < minRow; ++i) {
		ushort* ptr1 = cutImg1.ptr<ushort>(i);
		for (int j = 0; j < minCol; ++j) {
			if (ptr1[j] > 1000) {
				ptr1[j] -= pixelDiff;
			}
			
		}
	}
	//jun heng yan se end

	Mat combineMat;
	hconcat(cutImg1, cutImg2, combineMat);
	imwrite(path3,combineMat);
	//cout << "combine image : size : " << combineMat.size() << endl;
	//namedWindow("combine image", CV_WINDOW_NORMAL);
	//imshow("combine image", combineMat);
	//waitKey(0);
	//imwrite("E:\\Project\\C085\\Codes\\02Code\\3DLMI\\C085LMI\\Image\\C1.png", combineMat);
	//destroyAllWindows();
	return combineMat;
}
//------------------------------拼接图片结束-----------------------------------------//

//------------------------------旋转图片---------------------------------------------//
//第一个参数:输入图片名称;第二个参数:输出图片名称
// origin function declare -- void GetContoursPic(char* pSrcFileName, char* pDstFileName)
Mat GetContoursPic(Mat srcImg, char* pDstFileName)
{
	//Mat srcImg = imread(pSrcFileName);
	//namedWindow("原始图", CV_WINDOW_NORMAL);
	//imshow("原始图", srcImg);
	Mat gray, binImg;
	Mat rotationedImage;
	//灰度化
	gray = srcImg;
	//cvtColor(srcImg, gray, COLOR_RGB2GRAY);
	//namedWindow("灰度图", CV_WINDOW_NORMAL);
	//imshow("灰度图", gray);
	//二值化
	threshold(gray, binImg, 40, 200, CV_THRESH_BINARY);
	namedWindow("二值化", CV_WINDOW_NORMAL);
	imshow("二值化", binImg);
	waitKey(0);
	destroyWindow("二值化");
	vector<vector<Point> > contours;
	vector<Rect> boundRect(contours.size());
	//注意第5个参数为CV_RETR_EXTERNAL,只检索外框  
	findContours(binImg, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE); //找轮廓
																			//cout << contours.size() << endl;
	for (int i = 0; i < contours.size(); i++)
	{
		//需要获取的坐标  
		CvPoint2D32f rectpoint[4];
		CvBox2D rect = minAreaRect(Mat(contours[i]));

		cvBoxPoints(rect, rectpoint); //获取4个顶点坐标  
									  //与水平线的角度  
		float angle = rect.angle;
		//cout << angle << endl;
		
		int line1 = sqrt((rectpoint[1].y - rectpoint[0].y)*(rectpoint[1].y - rectpoint[0].y) + (rectpoint[1].x - rectpoint[0].x)*(rectpoint[1].x - rectpoint[0].x));
		int line2 = sqrt((rectpoint[3].y - rectpoint[0].y)*(rectpoint[3].y - rectpoint[0].y) + (rectpoint[3].x - rectpoint[0].x)*(rectpoint[3].x - rectpoint[0].x));
		//rectangle(binImg, rectpoint[0], rectpoint[3], Scalar(255), 2);
		//面积太小的直接pass
		if (line1 * line2 < 600)
		{
			continue;
		}

		//为了让正方形横着放,所以旋转角度是不一样的。竖放的,给他加90度,翻过来  
		if (line1 > line2)
		{
			//angle = 90 + angle;
			angle = angle;
			
		}

		//新建一个感兴趣的区域图,大小跟原图一样大  
		Mat RoiSrcImg(srcImg.rows, srcImg.cols, CV_8UC3); //注意这里必须选CV_8UC3
		RoiSrcImg.setTo(0); //颜色都设置为黑色  
							//imshow("新建的ROI", RoiSrcImg);
							//对得到的轮廓填充一下  
		drawContours(binImg, contours, -1, Scalar(255), CV_FILLED);

		//抠图到RoiSrcImg 
		srcImg.copyTo(RoiSrcImg, binImg);


		//再显示一下看看,除了感兴趣的区域,其他部分都是黑色的了  
		//namedWindow("RoiSrcImg", CV_WINDOW_NORMAL);
		//imshow("RoiSrcImg", RoiSrcImg);

		//创建一个旋转后的图像  
		Mat RatationedImg(RoiSrcImg.rows, RoiSrcImg.cols, CV_8UC1);
		RatationedImg.setTo(0);
		//对RoiSrcImg进行旋转  
		Point2f center = rect.center;  //中心点  
		Mat M2 = getRotationMatrix2D(center, angle, 1);//计算旋转加缩放的变换矩阵 
		warpAffine(RoiSrcImg, RatationedImg, M2, RoiSrcImg.size(), 1, 0, Scalar(0));//仿射变换 
																					//namedWindow("旋转之后", CV_WINDOW_NORMAL);
																					//imshow("旋转之后", RatationedImg);
		imwrite(string("D:\\Image\\rota")+to_string(i)+".png", RatationedImg); //将矫正后的图片保存下来
										 //rotationedImage = RatationedImg;
	}

	Mat dstImg;

#if 1
	//对ROI区域进行抠图

	//对旋转后的图片进行轮廓提取  
	vector<vector<Point> > contours2;
	Mat raw = imread("r.jpg");
	//Mat raw = rotationedImage;
	Mat SecondFindImg;
	//SecondFindImg.setTo(0);
	//SecondFindImg = raw;
	cvtColor(raw, SecondFindImg, COLOR_BGR2GRAY);  //灰度化  
	threshold(SecondFindImg, SecondFindImg, 80, 200, CV_THRESH_BINARY);
	findContours(SecondFindImg, contours2, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
	//cout << "sec contour:" << contours2.size() << endl;

	for (int j = 0; j < contours2.size(); j++)
	{
		//这时候其实就是一个长方形了,所以获取rect  
		Rect rect = boundingRect(Mat(contours2[j]));
		//面积太小的轮廓直接pass,通过设置过滤面积大小,可以保证只拿到外框
		if (rect.area() < 600)
		{
			continue;
		}
		dstImg = raw(rect);
		//namedWindow("dst", CV_WINDOW_NORMAL);
		//imshow("dst", dstImg);
		imwrite(pDstFileName, dstImg);

	}
#endif

	return dstImg;
}
//------------------------------旋转图片结束-----------------------------------------//

//------------------------------检测图片---------------------------------------------//
void DealCellSurface(Mat srcImg,char *savePath) {
	cv::Mat image, imagemean, diff, Mask;
	//image = imread(imagePath);
	image = srcImg;
	blur(image, imagemean, Size(13, 13));
	subtract(imagemean, image, diff);
	threshold(diff, Mask, 5, 255, THRESH_BINARY_INV);//同动态阈值分割dyn_threshold
	//imshow("imagemean", imagemean);
	//imshow("diff", diff);
	//imshow("Mask", Mask);
	Mat imagegray;
	cvtColor(Mask, imagegray, CV_RGB2GRAY);
	vector<vector<Point> > contours;
	vector<Vec4i> hierarchy;
	findContours(imagegray, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
	Mat drawing = Mat::zeros(Mask.size(), CV_8U);
	int j = 0;
	for (int i = 0; i < contours.size(); i++)
	{
		Moments moms = moments(Mat(contours[i]));
		double area = moms.m00;    //零阶矩即为二值图像的面积  double area = moms.m00;  
								   //如果面积超出了设定的范围,则不再考虑该斑点  
		if (area > 20 && area < 1000)
		{
			drawContours(drawing, contours, i, Scalar(255), CV_FILLED, 8, hierarchy, 0, Point());
			j = j + 1;
		}
	}
	cv::Mat element15(3, 3, CV_8U, cv::Scalar(1));
	cv::Mat close;
	cv::morphologyEx(drawing, close, cv::MORPH_CLOSE, element15);
	//imshow("drawing", drawing);
	vector<vector<Point> > contours1;
	vector<Vec4i> hierarchy1;
	findContours(close, contours1, hierarchy1, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
	//imshow("close", close);
	j = 0;
	int m = 0;
	for (int i = 0; i < contours1.size(); i++)
	{
		Moments moms = moments(Mat(contours1[i]));
		double area = moms.m00;    //零阶矩即为二值图像的面积  double area = moms.m00;  
								   //如果面积超出了设定的范围,则不再考虑该斑点  
		double area1 = contourArea(contours1[i]);
		if (area > 50 && area < 100000)
		{
			drawContours(image, contours1, i, Scalar(0, 0, 255), CV_FILLED, 8, hierarchy1, 0, Point());
			j = j + 1;
		}
		else if (area >= 0 && area <= 50)
		{
			drawContours(image, contours1, i, Scalar(255, 0, 0), CV_FILLED, 8, hierarchy1, 0, Point());
			m = m + 1;
		}
	}
	char t[256];
	sprintf_s(t, "%01d", j);
	string s = t;
	string txt = "Long NG : " + s;
	putText(image, txt, Point(20, 30), CV_FONT_HERSHEY_COMPLEX, 1, Scalar(0, 0, 255), 2, 8);
	sprintf_s(t, "%01d", m);
	s = t;
	txt = "Short NG : " + s;
	putText(image, txt, Point(20, 60), CV_FONT_HERSHEY_COMPLEX, 1, Scalar(255, 0, 0), 2, 8);
	imwrite(savePath, image);
	//waitKey();
}
//------------------------------检测图片结束-----------------------------------------//

//------------------------------最后导出的函数---------------------------------------//
void DealConcateImage(char *srcPath1, char *srcPath2,  char *savePath, int minThres = 50, int maxThres = 220, bool fbArea = true, int minA = 10, int maxA = 10000, int bColor = 255) {
	Mat tempM;
	//拼接
	tempM = ConcateImage(srcPath1, srcPath2,savePath, minThres,maxThres ,fbArea ,minA,maxA,bColor);
	
	//旋转
	//tempM = GetContoursPic(tempM, dstPath);
	//imwrite(dstPath, tempM);
	//检测
	//DealCellSurface(tempM,savePath);
}
//------------------------------最后导出的函数结束-----------------------------------//

/*
int main(int argc, char **argv)
{    

	DealConcateImage("D:\\Image\\1.png", "D:\\Image\\2.png", "D:\\Image\\combineMat.png");
	return 0;
}
*/


/*
void complex_test1()
{
Mat x1 = Mat(1, 3, CV_64FC2);

x1.ptr<Point2d>(0)[0] = Point2d(2, 3);
x1.ptr<Point2d>(0)[1] = Point2d(4, 0);
x1.ptr<Point2d>(0)[2] = x1.ptr<Point2d>(0)[0] + x1.ptr<Point2d>(0)[1];

cout << "Matrix x1:" << x1 << endl;
}

void main()
{
//complex_test1();
string path1("E:\\Project\\C085\\Codes\\02Code\\3DLMI\\C085LMI\\Image\\C1.png");
string path2("E:\\Project\\C085\\Codes\\02Code\\3DLMI\\C085LMI\\Image\\CC1.png");
GetContoursPic(path1.c_str(), path2.c_str());


waitKey();
}
*/

//------------------------------------旋转图片结束------------------------------//

//------------------------------------------------------------------------//

/*  origin codes -- dispose
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"

#include "ConcateImage.h"

#include <iostream>
#include <fstream>

using namespace cv;
using namespace std;

void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst);

typedef struct
{
Point2f left_top;
Point2f left_bottom;
Point2f right_top;
Point2f right_bottom;
}four_corners_t;

four_corners_t corners;

void CalcCorners(const Mat& H, const Mat& src)
{
double v2[] = { 0, 0, 1 };//左上角
double v1[3];//变换后的坐标值
Mat V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
Mat V1 = Mat(3, 1, CV_64FC1, v1);  //列向量

V1 = H * V2;
//左上角(0,0,1)
cout << "V2: " << V2 << endl;
cout << "V1: " << V1 << endl;
corners.left_top.x = v1[0] / v1[2];
corners.left_top.y = v1[1] / v1[2];

//左下角(0,src.rows,1)
v2[0] = 0;
v2[1] = src.rows;
v2[2] = 1;
V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
V1 = H * V2;
corners.left_bottom.x = v1[0] / v1[2];
corners.left_bottom.y = v1[1] / v1[2];

//右上角(src.cols,0,1)
v2[0] = src.cols;
v2[1] = 0;
v2[2] = 1;
V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
V1 = H * V2;
corners.right_top.x = v1[0] / v1[2];
corners.right_top.y = v1[1] / v1[2];

//右下角(src.cols,src.rows,1)
v2[0] = src.cols;
v2[1] = src.rows;
v2[2] = 1;
V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
V1 = H * V2;
corners.right_bottom.x = v1[0] / v1[2];
corners.right_bottom.y = v1[1] / v1[2];

}

void DealConcateImage(char *img01, char *img02)
{

//string img01, img02;
//ifstream myin("F:\\ImageSource.txt");
//myin>>img01;
//myin>>img02;
//myin.close();

Mat image01 = imread(img02, 1);    //右图
Mat image02 = imread(img01, 1);    //左图


//Mat image01 = imread("E:\\g8.PNG", 1);    //右图
//Mat image02 = imread("E:\\g7.PNG", 1);    //左图

imshow("p2", image01);
imshow("p1", image02);

//灰度图转换
Mat image1, image2;
cvtColor(image01, image1, CV_RGB2GRAY);
cvtColor(image02, image2, CV_RGB2GRAY);


//提取特征点
SurfFeatureDetector Detector(2000);
vector<KeyPoint> keyPoint1, keyPoint2;
Detector.detect(image1, keyPoint1);
Detector.detect(image2, keyPoint2);

//特征点描述,为下边的特征点匹配做准备
SurfDescriptorExtractor Descriptor;
Mat imageDesc1, imageDesc2;
Descriptor.compute(image1, keyPoint1, imageDesc1);
Descriptor.compute(image2, keyPoint2, imageDesc2);

FlannBasedMatcher matcher;
vector<vector<DMatch> > matchePoints;
vector<DMatch> GoodMatchePoints;

vector<Mat> train_desc(1, imageDesc1);
matcher.add(train_desc);
matcher.train();

matcher.knnMatch(imageDesc2, matchePoints, 2);
cout << "total match points: " << matchePoints.size() << endl;

// Lowe's algorithm,获取优秀匹配点
for (int i = 0; i < matchePoints.size(); i++)
{
if (matchePoints[i][0].distance < 0.4 * matchePoints[i][1].distance)
{
GoodMatchePoints.push_back(matchePoints[i][0]);
}
}

Mat first_match;
drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
imshow("first_match ", first_match);

vector<Point2f> imagePoints1, imagePoints2;

for (int i = 0; i<GoodMatchePoints.size(); i++)
{
imagePoints2.push_back(keyPoint2[GoodMatchePoints[i].queryIdx].pt);
imagePoints1.push_back(keyPoint1[GoodMatchePoints[i].trainIdx].pt);
}



//获取图像1到图像2的投影映射矩阵 尺寸为3*3
Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
////也可以使用getPerspectiveTransform方法获得透视变换矩阵,不过要求只能有4个点,效果稍差
//Mat   homo=getPerspectiveTransform(imagePoints1,imagePoints2);
cout << "变换矩阵为:\n" << homo << endl << endl; //输出映射矩阵

//计算配准图的四个顶点坐标
CalcCorners(homo, image01);
cout << "left_top:" << corners.left_top << endl;
cout << "left_bottom:" << corners.left_bottom << endl;
cout << "right_top:" << corners.right_top << endl;
cout << "right_bottom:" << corners.right_bottom << endl;

//图像配准
Mat imageTransform1, imageTransform2;
warpPerspective(image01, imageTransform1, homo, Size(MAX(corners.right_top.x, corners.right_bottom.x), image02.rows));
//warpPerspective(image01, imageTransform2, adjustMat*homo, Size(image02.cols*1.3, image02.rows*1.8));
imshow("直接经过透视矩阵变换", imageTransform1);
imwrite("trans1.jpg", imageTransform1);


//创建拼接后的图,需提前计算图的大小
int dst_width = imageTransform1.cols;  //取最右点的长度为拼接图的长度
int dst_height = image02.rows;

Mat dst(dst_height, dst_width, CV_8UC3);
dst.setTo(0);

imageTransform1.copyTo(dst(Rect(0, 0, imageTransform1.cols, imageTransform1.rows)));
image02.copyTo(dst(Rect(0, 0, image02.cols, image02.rows)));

imshow("b_dst", dst);


OptimizeSeam(image02, imageTransform1, dst);


imshow("dst", dst);
imwrite("dst.jpg", dst);

//waitKey();

}


//优化两图的连接处,使得拼接自然
void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst)
{
int start = MIN(corners.left_top.x, corners.left_bottom.x);//开始位置,即重叠区域的左边界

double processWidth = img1.cols - start;//重叠区域的宽度
int rows = dst.rows;
int cols = img1.cols; //注意,是列数*通道数
double alpha = 1;//img1中像素的权重
for (int i = 0; i < rows; i++)
{
uchar* p = img1.ptr<uchar>(i);  //获取第i行的首地址
uchar* t = trans.ptr<uchar>(i);
uchar* d = dst.ptr<uchar>(i);
for (int j = start; j < cols; j++)
{
//如果遇到图像trans中无像素的黑点,则完全拷贝img1中的数据
if (t[j * 3] == 0 && t[j * 3 + 1] == 0 && t[j * 3 + 2] == 0)
{
alpha = 1;
}
else
{
//img1中像素的权重,与当前处理点距重叠区域左边界的距离成正比,实验证明,这种方法确实好
alpha = (processWidth - (j - start)) / processWidth;
}

d[j * 3] = p[j * 3] * alpha + t[j * 3] * (1 - alpha);
d[j * 3 + 1] = p[j * 3 + 1] * alpha + t[j * 3 + 1] * (1 - alpha);
d[j * 3 + 2] = p[j * 3 + 2] * alpha + t[j * 3 + 2] * (1 - alpha);

}
}

}
*/

猜你喜欢

转载自blog.csdn.net/Bamboo265925/article/details/84101401