Opencv2.4学习::边缘检测(6)Marr-Hildreth算法(LOG算法)

边缘检测

1、Sobel

2、Laplace

3、Roberts

4、Canny


Marr-Hildreth

简单来说,就是先对图像进行(1)高斯滤波,再进行拉普拉斯变换,(2)由于拉普拉斯变换是二阶偏导,边缘点对应的一阶偏导为局部极值,那么其二阶偏导则为0点,(3)所以最后一步为0点检测

 相关数学证明请参见:

https://blog.csdn.net/songzitea/article/details/12851079

http://homepages.inf.ed.ac.uk/rbf/HIPR2/log.htm

下面给出拉普拉斯算子:

高斯核模版如下:

而这里的算法就是,经过研究, Marr 和Hildreth发现,可以将这两个算子融合在一齐得到的算子称LOG算子,这样就不用经过两次运算,直接进行一次卷积即可达到结果。而最终结果是LOG算子中高斯部分对图像进行了滤波平滑处理,而拉普拉斯部分对图像进行了二阶偏导,现在是时候给出LOG算子模版了。

这里先给出LOG函数(墨西哥草帽)的真容

那么,对应的5*5 LOG算子为:


实现代码:

#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<iostream>
using namespace std;
using namespace cv;
void marrEdge(const Mat src, Mat &result, int kerValue,
	double delta)
{
	//计算LOG算子
	Mat kernel;
	//半径
	int kerLen = kerValue / 2;
	kernel = Mat_<double>(kerValue, kerValue);
	//滑窗
	for (int i = -kerLen; i <= kerLen; i++){
		for (int j = -kerLen; j <= kerLen; j++){
			//生成核因子
			kernel.at<double>(i + kerLen, j + kerLen) =
				exp(-((pow(j, 2) + pow(i, 2)) /
				(pow(delta, 2) * 2)))*
				(((pow(j, 2) + pow(i, 2) - 2 *
				pow(delta, 2)) / (2 * pow(delta, 4))));

		}
	}
	//设置输出参数
	int kerOffset = kerValue / 2;
	Mat laplacian = (Mat_<double>(src.rows - kerOffset * 2,
		src.cols - kerOffset * 2));
	result = Mat::zeros(src.rows - kerOffset * 2,
		src.cols - kerOffset * 2, src.type());
	double sumLaplacian;
	//遍历计算卷积图像的拉普拉斯算子
	for (int i = kerOffset; i < src.rows - kerOffset; ++i){
		for (int j = kerOffset; j < src.cols - kerOffset; ++j){
			sumLaplacian = 0;
			for (int k = -kerOffset; k <= kerOffset; ++k){
				for (int m = -kerOffset; m <= kerOffset; ++m){
					//计算图像卷积
					sumLaplacian += src.at<uchar>(i + k, j + m)*
						kernel.at<double>(kerOffset + k, kerOffset + m);
				}
			}
			//生成该像素下的拉普拉斯结果
			laplacian.at<double>(i - kerOffset,
				j - kerOffset) = sumLaplacian;
		}
	}
	//过零点交叉(寻找零点),寻找边缘点
	for (int y = 1; y < result.rows - 1; ++y){
		for (int x = 1; x < result.cols - 1; ++x){
			result.at<uchar>(y, x) = 0;
			//邻域判定 4个方向
			if (laplacian.at<double>(y - 1, x)*
				laplacian.at<double>(y + 1, x) < 0){
				result.at<uchar>(y, x) = 255;
			}
			if (laplacian.at<double>(y, x-1)*
				laplacian.at<double>(y , x+ 1) < 0){
				result.at<uchar>(y, x) = 255;
			}
			if (laplacian.at<double>(y +1, x-1)*
				laplacian.at<double>(y - 1, x+1) < 0){
				result.at<uchar>(y, x) = 255;
			}
			if (laplacian.at<double>(y - 1, x-1)*
				laplacian.at<double>(y + 1, x+1) < 0){
				result.at<uchar>(y, x) = 255;
			}
		}
	}
}

void main()
{
	Mat srcImage = imread("F:\\opencv_re_learn\\2.jpg");
	if (!srcImage.data){
		cout << "failed to read" << endl;
		system("pause");
		return;
	}
	Mat srcGray;
	cvtColor(srcImage, srcGray, CV_BGR2GRAY);
	Mat edge;
	//第3个参数为核的大小,一般从5开始
	marrEdge(srcGray, edge, 5, 1);
	imshow("srcImage", srcImage);
	imshow("edge", edge);
	waitKey(0);
}

实现效果:

这个算法调用起来比较卡,貌似是O(n^4)复杂度?


代码优化:

运行了几次,发现速度实在太慢,想了一下,能不能用filter2D去代替函数中的卷积操作部分?

实现代码:

#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<iostream>
using namespace std;
using namespace cv;

void marrEdge2(const Mat src, Mat &result, int kerValue,
	double delta)
{
	//计算LOG算子
	Mat kernel;
	//半径
	int kerLen = kerValue / 2;
	kernel = Mat_<double>(kerValue, kerValue);
	//滑窗
	for (int i = -kerLen; i <= kerLen; i++){
		for (int j = -kerLen; j <= kerLen; j++){
			//生成核因子
			kernel.at<double>(i + kerLen, j + kerLen) =
				exp(-((pow(j, 2) + pow(i, 2)) /
				(pow(delta, 2) * 2)))*
				(((pow(j, 2) + pow(i, 2) - 2 *
				pow(delta, 2)) / (2 * pow(delta, 4))));

		}
	}
	int kerOffset = kerValue / 2;
	Mat laplacian;
	filter2D(src, laplacian, src.depth(), kernel);
	imshow("laplacian", laplacian);
	//过零点交叉(寻找零点),寻找边缘点
	for (int y = 1; y < result.rows - 1; ++y){
		for (int x = 1; x < result.cols - 1; ++x){
			result.at<uchar>(y, x) = 0;
			//邻域判定 4个方向
			if (laplacian.at<double>(y - 1, x)*
				laplacian.at<double>(y + 1, x) < 0){
				result.at<uchar>(y, x) = 255;
			}
			if (laplacian.at<double>(y, x - 1)*
				laplacian.at<double>(y, x + 1) < 0){
				result.at<uchar>(y, x) = 255;
			}
			if (laplacian.at<double>(y + 1, x - 1)*
				laplacian.at<double>(y - 1, x + 1) < 0){
				result.at<uchar>(y, x) = 255;
			}
			if (laplacian.at<double>(y - 1, x - 1)*
				laplacian.at<double>(y + 1, x + 1) < 0){
				result.at<uchar>(y, x) = 255;
			}
		}
	}
	//卷积后的二值化
	threshold(laplacian, result, 1, 255, CV_THRESH_BINARY);
}
void main()
{
	Mat srcImage = imread("F:\\opencv_re_learn\\2.jpg");
	if (!srcImage.data){
		cout << "failed to read" << endl;
		system("pause");
		return;
	}
	Mat srcGray;
	cvtColor(srcImage, srcGray, CV_BGR2GRAY);
	Mat edge;
	//第3个参数为核的大小,一般从5开始
	marrEdge2(srcGray, edge, 5, 1);
	imshow("srcImage", srcImage);
	imshow("edge", edge);
	waitKey(0);
}

优化后的效果:

处理速度蹭蹭的上来了。。不过貌似少了最后一个步骤就是交叉零点检测,代码中取而代之的是二值化。

对比:https://blog.csdn.net/dieju8330/article/details/82813592 中的直接用拉普拉斯算子,发现周边的星星噪点确实少了。

猜你喜欢

转载自blog.csdn.net/dieju8330/article/details/82841033
今日推荐