C++ 高通滤波器实现边缘增强

参考:http://bbs.ednchina.com/BLOG_ARTICLE_2060808.HTM

主要实现:图像中的线条如果被大块颜色覆盖,线条的颜色和覆盖色区分不明显的时候,用局部二值化的方法不能将二者分开,因此可以增强图像的高频部分,使线条更加明显易于区分。

代码:


#include <iostream>
#include <fstream>
#include <stdlib.h> //srand()和rand()函数
#include <time.h> 
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/ml/ml.hpp>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
double D0 = 80;
void ILPF(CvMat* src, const double D0)
{
int i, j;
int state = -1;
double tempD;
long width, height;
width = src->width;
height = src->height;
long x, y;
x = width / 2;
y = height / 2;
CvMat* H_mat;
H_mat = cvCreateMat(src->height, src->width, CV_64FC2);
for (i = 0; i < height; i++)
{
    for (j = 0; j < width; j++)
    {
      if (i > y && j > x)
          {
state = 3;
}
else if (i > y)
{
state = 1;
}
else if (j > x)
{
state = 2;
}
else
{
state = 0;
}
switch (state)
{
case 0:
tempD = (double)(i * i + j * j); tempD = sqrt(tempD); break;
case 1:
tempD = (double)((height - i) * (height - i) + j * j); tempD = sqrt(tempD); break;
case 2:
tempD = (double)(i * i + (width - j) * (width - j)); tempD = sqrt(tempD); break;
case 3:
tempD = (double)((height - i) * (height - i) + (width - j) * (width - j)); tempD = sqrt(tempD); break;
default:
break;
}

//1.二维高斯高通滤波器

//tempD = 1 - exp(-0.5 * pow(tempD / D0, 2));

//((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2] = tempD;

//((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2 + 1] = 0.0;



////2.二维理想高通滤波器

//if(tempD <= D0)

扫描二维码关注公众号,回复: 2702030 查看本文章

//{

// ((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2] = 0.0;

//   ((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2 + 1] = 0.0;

//}

//else

//{

// ((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2] = 1.0;

//   ((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2 + 1] = 0.0;

//}


//3.2阶巴特沃思高通滤波器

/* tempD = 1 / (1 + pow(D0 / tempD, 2 * 2));

((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2] = tempD;

((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2 + 1] = 0.0;*/




//4.增长率为2二维指数高通滤波器

tempD = exp(-pow(D0 / tempD, 2));

((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2] = tempD;

((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2 + 1] = 0.0;


  }
}
cvMulSpectrums(src, H_mat, src, CV_DXT_ROWS);
cvReleaseMat(&H_mat);
}


int main()
{

IplImage * im;

IplImage * realInput;

IplImage * imaginaryInput;

IplImage * complexInput;

int dft_M, dft_N;

CvMat* dft_A, tmp, *dft_B;

IplImage * image_Re;

IplImage * image_Im;

double m, M;

//输入图像是灰度图

im = cvLoadImage("obj3.jpg", CV_LOAD_IMAGE_GRAYSCALE);

if (!im)

return 0;


//做DFT变换的尺寸不一样

dft_M = cvGetOptimalDFTSize(im->height - 1);

dft_N = cvGetOptimalDFTSize(im->width - 1);

dft_B = cvCreateMat(dft_M, dft_N, CV_64FC2);

dft_A = cvCreateMat(dft_M, dft_N, CV_64FC2);

cvZero(dft_A);

cvZero(dft_B);


//先把im扩充,之后赋值就不需要扩充。根据图的需要,这里扩充白色边界,在im中也就是cvScalar(255)
//扩充的颜色值应该与背景颜色一致,否则会在背景与前景相接的位置检测到多余的轮廓。
IplImage* large_img = cvCreateImage(cvSize(dft_N, dft_M), IPL_DEPTH_8U, 1);//IPL_DEPTH_8U-----uchar
cvCopyMakeBorder(im, large_img, cvPoint((dft_N - im->width) / 2, (dft_M - im->height) / 2), IPL_BORDER_CONSTANT, cvScalar(255));//  0表示黑色)


realInput = cvCreateImage(cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
imaginaryInput = cvCreateImage(cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
complexInput = cvCreateImage(cvSize(dft_N, dft_M), IPL_DEPTH_64F, 2);
//高通滤波之前,需要转化成double,实部输入就是灰度图本身,虚部是空,合并为复变量complexInput,这时尺寸已经满足要求
cvScale(large_img, realInput, 1.0, 0.0);
cvZero(imaginaryInput);
cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

//把数据直接裁剪到中心位置
cvGetSubRect(dft_A, &tmp, cvRect(0, 0, complexInput->width, complexInput->height));
//cvGetSubRect(dft_A, &tmp, cvRect(0,0,im->width,im->height));
cvCopy(complexInput, &tmp, NULL);


//DFT变换,高通滤波,DFT反变换
cvDFT(dft_A, dft_A, CV_DXT_FORWARD, complexInput->height);
ILPF(dft_A, D0);
cvDFT(dft_A, dft_A, CV_DXT_INVERSE, complexInput->height);


//取出实部
image_Re = cvCreateImage(cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
image_Im = cvCreateImage(cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
cvSplit(dft_A, image_Re, image_Im, 0, 0);
IplImage *uint8image = cvCreateImage(cvGetSize(image_Re), IPL_DEPTH_8U, 1);//
cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
cvConvertScale(image_Re, uint8image, 255 / (M - m), 255 * (-m) / (M - m));//0-255之间
//cvMinMaxLoc(uint8image, &m, &M, NULL, NULL, NULL);//此时的n,M [0-1]

//二值化
IplImage *bwimage = cvCreateImage(cvGetSize(uint8image), IPL_DEPTH_8U, 1);//二值图像
cvAdaptiveThreshold(uint8image, bwimage, 255, CV_ADAPTIVE_THRESH_MEAN_C, 0, 11, 10.0);//,CV_ADAPTIVE_THRESH_GAUSSIAN_C,7


cvNamedWindow("src_img", CV_WINDOW_AUTOSIZE);
cvShowImage("src_img",im);//原图
cvNamedWindow("grayimage", CV_WINDOW_AUTOSIZE);
cvShowImage("grayimage", uint8image);//灰度图
cvSaveImage("grayimage.jpg", uint8image);
cvNamedWindow("bwimage", CV_WINDOW_AUTOSIZE);
cvShowImage("bwimage", bwimage);//二值
cvSaveImage("bwimage.jpg", bwimage);
cvWaitKey(-1);

return 0;
}

猜你喜欢

转载自blog.csdn.net/u011731135/article/details/52541199