【基础】【C++与OpenCV】均值滤波、高斯滤波、中值滤波、两层伪彩色处理、多层伪彩色处理、假彩色变换、图像反转、对数变换、GAMMA变换、对比度拉伸、灰度级分层、比特平面分层、拉普拉斯锐化

每个代码片都比较短,所以放在一起。
灰度图原图:
在这里插入图片描述

均值滤波

滤波模板中的所有元素均为1/k,大小为k×k,未对边缘进行处理。
代码:

#include <opencv2\highgui\highgui.hpp>
using namespace cv;
void main()
{
	Mat srcImage = imread("1.jpg", 0);
	Mat dstImage_Average;
	dstImage_Average.create(srcImage.size(), srcImage.type());
	int k_Average = 3;
	for (int row = k_Average / 2; row < srcImage.rows - k_Average / 2; row++)
		for (int col = k_Average / 2; col < srcImage.cols - k_Average / 2; col++)
		{
			int pixel_sum = 0;
			for (int a = -k_Average / 2; a <= k_Average / 2; a++)
				for (int b = -k_Average / 2; b <= k_Average / 2; b++)
					pixel_sum = pixel_sum + srcImage.at<uchar>(row + a, col + b);
			dstImage_Average.at<uchar>(row, col) = pixel_sum / (k_Average*k_Average);
		}
	imshow("均值滤波", dstImage_Average);
	waitKey(0);
}

在这里插入图片描述
11×11内核大小的均值滤波效果。

高斯滤波

内核为k×k的二维正态分布函数,未对边缘进行处理。
代码:

    #include <opencv2\highgui\highgui.hpp>
    using namespace cv;
    double PI = 3.1415926536;
    Mat mask_Gaussian(int k,float sigma);//k×k高斯滤波核
	Mat mask_Gaussian(int k,float sigma)
	{
		double sum = 0.0;
		Mat mask(k, k, CV_32F, Scalar::all(0));
		for (int a = 0; a < k; ++a)
			for (int b = 0; b < k; ++b)
				mask.at<float>(a, b) = exp(-((a - k / 2)*(a - k / 2) + (b - k / 2)*(b - k / 2)) / (2 * sigma*sigma))/ (2 * PI*sigma*sigma);
		cout << mask;
		return mask;
	}
    void main()
    {
        Mat srcImage = imread("1.jpg", 0);
	    Mat dstImage_Gaussian;
		dstImage_Gaussian.create(srcImage.size(), CV_8U);
		int k_Gaussian = 11;
		double sigma = (k_Gaussian*0.5 - 1)*0.3 + 0.8;
		Mat mask = mask_Gaussian(k_Gaussian, sigma);
		for (int row = k_Gaussian / 2; row < srcImage.rows - k_Gaussian / 2; row++)
			for (int col = k_Gaussian / 2; col < srcImage.cols - k_Gaussian / 2; col++)
			{
			    double pixel_sum = 0.0;
				for (int a = -k_Gaussian / 2; a < k_Gaussian / 2; a++)
					for (int b = -k_Gaussian / 2; b < k_Gaussian / 2; b++)
						pixel_sum = pixel_sum + mask.at<float>(a + k_Gaussian / 2, b + k_Gaussian / 2) * srcImage_float1.at<uchar>(row + a, col + b);
				dstImage_Gaussian.at<uchar>(row, col) = pixel_sum;
			}
		imshow("高斯滤波", dstImage_Gaussian);
		waitKey(0);
    }

在这里插入图片描述
11×11内核大小的高斯滤波效果。

中值滤波

内核为k×k大小,掩模的中值代替掩模中心位置的像素。
代码:

    #include <opencv2\highgui\highgui.hpp>
    using namespace cv;
    void main()
    {
        Mat srcImage = imread("1.jpg", 0);
	    Mat dstImage_median;
		dstImage_median.create(srcImage.size(), srcImage.type());
		int k_median = 11;
		for (int row = k_median / 2; row < srcImage.rows - k_median / 2; row++)
			for (int col = k_median / 2; col < srcImage.cols - k_median / 2; col++)
			{
				Mat pixel_group(k_median, k_median, CV_8U);//新建数组,将内核中的所有像素存储进来。
				Mat pixel_group_median(1, k_median, CV_8U);//新建数组,存储内核中每一行的中值。
				for (int a = -k_median / 2; a < k_median / 2; a++)
					for (int b = -k_median / 2; b < k_median / 2; b++)
						pixel_group.at<uchar>(a + k_median / 2, b + k_median / 2) = srcImage.at<uchar>(row + a, col + b);//对pixel_group进行赋值
				cv::sort(pixel_group, pixel_group, SORT_ASCENDING);//对存储有内核像素的数组的每一行进行从大到小的排列。
				for (int a = 0; a < k_median; a++)
					pixel_group_median.at<uchar>(0, a) = pixel_group.at<uchar>(a, k_median / 2);//掩模范围中每一行中间位置的像素值就是每一行的中值,将其取出,并按数组序号从小到大依次放在pixel_group_median数组中
				cv::sort(pixel_group_median, pixel_group_median, SORT_ASCENDING);//对pixel_group_median从大到小进行排列
				dstImage_median.at<uchar>(row, col) = pixel_group_median.at<uchar>(0, k_median / 2);//pixel_group_median数组中间位置的像素值就是整个掩模范围内的中值。
			}
		imshow("中值滤波", dstImage_median);
		waitKey(0);
	}

在这里插入图片描述
内核大小为11×11的中值滤波效果。

伪彩色处理

两层:


    #include <opencv2\highgui\highgui.hpp>
    using namespace cv;
    void main()
    {
        Mat srcImage = imread("1.jpg", 0);
	    Mat dstImage_Pseudocolor_2Layer;
		dstImage_Pseudocolor_2Layer.create(srcImage_color.size(), CV_8UC3);
		for (int row = 0; row < srcImage.rows; row++)
			for (int col = 0; col < srcImage.cols; col++)
				if (srcImage.at<uchar>(row, col) <= 127)
				{
					dstImage_Pseudocolor_2Layer.at<Vec3b>(row, col)[0] = 0;
					dstImage_Pseudocolor_2Layer.at<Vec3b>(row, col)[1] = 255;
					dstImage_Pseudocolor_2Layer.at<Vec3b>(row, col)[2] = 0;
				}
				else
				{
					dstImage_Pseudocolor_2Layer.at<Vec3b>(row, col)[0] = 0;
					dstImage_Pseudocolor_2Layer.at<Vec3b>(row, col)[1] = 0;
					dstImage_Pseudocolor_2Layer.at<Vec3b>(row, col)[2] = 255;
				}
		imshow("图像二值化", dstImage_Pseudocolor_2Layer);
		waitKey(0);
	}

大于127的变为绿色,小于127的变为红色。
在这里插入图片描述
多层:

    #include <opencv2\highgui\highgui.hpp>
    using namespace cv;
    void main()
    {
	    Mat dstImage_Pseudocolor_MultiLayer;
		dstImage_Pseudocolor_MultiLayer.create(srcImage_color.size(), CV_8UC3);
		for (int row = 0; row < srcImage.rows; row++)
			for (int col = 0; col < srcImage.cols; col++)
				if (srcImage.at<uchar>(row, col) <= 50)
				{
					dstImage_Pseudocolor_MultiLayer.at<Vec3b>(row, col)[2] = 2.56*srcImage.at<uchar>(row, col) + 127;
					dstImage_Pseudocolor_MultiLayer.at<Vec3b>(row, col)[0] = 0;
					dstImage_Pseudocolor_MultiLayer.at<Vec3b>(row, col)[1] = 0;
				}
				else if (srcImage.at<uchar>(row, col) > 50 && srcImage.at<uchar>(row, col) <= 100)
				{
					dstImage_Pseudocolor_MultiLayer.at<Vec3b>(row, col)[2] = 255;
					dstImage_Pseudocolor_MultiLayer.at<Vec3b>(row, col)[0] = 0;
					dstImage_Pseudocolor_MultiLayer.at<Vec3b>(row, col)[1] = 2.56*srcImage.at<uchar>(row, col) - 1;
				}
				else if (srcImage.at<uchar>(row, col) > 100 && srcImage.at<uchar>(row, col) <= 150)
				{
					dstImage_Pseudocolor_MultiLayer.at<Vec3b>(row, col)[2] = -2.56*srcImage.at<uchar>(row, col) + 384;
					dstImage_Pseudocolor_MultiLayer.at<Vec3b>(row, col)[0] = srcImage.at<uchar>(row, col) - 100;
					dstImage_Pseudocolor_MultiLayer.at<Vec3b>(row, col)[1] = 255;
				}
				else if (srcImage.at<uchar>(row, col) > 150 && srcImage.at<uchar>(row, col) <= 200)
				{
					dstImage_Pseudocolor_MultiLayer.at<Vec3b>(row, col)[2] = 0;
					dstImage_Pseudocolor_MultiLayer.at<Vec3b>(row, col)[0] = 255;
					dstImage_Pseudocolor_MultiLayer.at<Vec3b>(row, col)[1] = -srcImage.at<uchar>(row, col) + 255;
				}
				else
				{
					dstImage_Pseudocolor_MultiLayer.at<Vec3b>(row, col)[2] = 0;
					dstImage_Pseudocolor_MultiLayer.at<Vec3b>(row, col)[0] = -srcImage.at<uchar>(row, col) + 382;
					dstImage_Pseudocolor_MultiLayer.at<Vec3b>(row, col)[1] = 0;
				}
		imshow("多层伪彩色", dstImage_Pseudocolor_MultiLayer);
		waitKey(0);
	}

效果:
在这里插入图片描述

假彩色变换

原理:将各原图中的各颜色通道用别的颜色通道代替。

#include <opencv2\highgui\highgui.hpp>
using namespace cv;
void main()
{
	Mat srcImage = imread("1.jpg", 0);
	Mat dstImage_PseudocolorTransformation(srcImage_color.size(), CV_8UC3);
	for (int row = 0; row < srcImage.rows; row++)
		for (int col = 0; col < srcImage.cols; col++)
		{
			dstImage_PseudocolorTransformation.at<Vec3b>(row, col)[0] = srcImage_color.at<Vec3b>(row, col)[1];
			dstImage_PseudocolorTransformation.at<Vec3b>(row, col)[1] = srcImage_color.at<Vec3b>(row, col)[2];
			dstImage_PseudocolorTransformation.at<Vec3b>(row, col)[2] = srcImage_color.at<Vec3b>(row, col)[0];
		}
	imshow("假彩色变换", dstImage_PseudocolorTransformation);
	waitKey(0);
}

效果:
在这里插入图片描述

图像反转(负片效果)

原理:将像素进行变换,公式:新像素=255-原像素。

#include <opencv2\highgui\highgui.hpp>
using namespace cv;
void main()
{
	Mat srcImage = imread("1.jpg", 0);
    Mat dstImage_Reverse(srcImage_color.size(), CV_8UC3);
	for (int row = 0; row < srcImage.rows; row++)
		for (int col = 0; col < srcImage.cols; col++)
		{
			dstImage_Reverse.at<Vec3b>(row, col)[0] = 255 - srcImage_color.at<Vec3b>(row, col)[0];
			dstImage_Reverse.at<Vec3b>(row, col)[1] = 255 - srcImage_color.at<Vec3b>(row, col)[1];
			dstImage_Reverse.at<Vec3b>(row, col)[2] = 255 - srcImage_color.at<Vec3b>(row, col)[2];
		}
	imshow("图像反转", dstImage_Reverse);
	watiKey(0);
}

效果:
在这里插入图片描述

图像对数变换

原理:将原像素进行对数映射。

#include <opencv2\highgui\highgui.hpp>
using namespace cv;
void main()
{
    Mat srcImage = imread("1.jpg", 0);
    Mat dstImage_LogarithmicTransformation(srcImage_color.size(), CV_8UC3);
	for (int row = 0; row < srcImage.rows; row++)
		for (int col = 0; col < srcImage.cols; col++)
		{
			dstImage_LogarithmicTransformation.at<Vec3b>(row, col)[0] = 45 * log(1 + srcImage_color.at<Vec3b>(row, col)[0]);
			dstImage_LogarithmicTransformation.at<Vec3b>(row, col)[1] = 45 * log(1 + srcImage_color.at<Vec3b>(row, col)[1]);
			dstImage_LogarithmicTransformation.at<Vec3b>(row, col)[2] = 45 * log(1 + srcImage_color.at<Vec3b>(row, col)[2]);
		}
	imshow("对数变换", dstImage_LogarithmicTransformation);
	waitKey(0);
}

效果:
在这里插入图片描述

GAMMA变换

原理:将原像素进行幂指数映射。注意一定要将原图的像素转换到0到1的范围,这样才能利用幂指数函数0到1范围的性质进行GAMMA变换。

#include <opencv2\highgui\highgui.hpp>
using namespace cv;
void main()
{
    Mat srcImage = imread("1.jpg", 0);
    Mat dstImage_LogarithmicTransformation(srcImage_color.size(), CV_8UC3);
    Mat dstImage_GAMMA(srcImage_color.size(), CV_8UC3);
	double gamma = 4;
	for (int row = 0; row < srcImage.rows; row++)
		for (int col = 0; col < srcImage.cols; col++)
		{
			//首先将像素值归一化到0到1的范围内,然后再进入幂指数函数进行运算,运算完后再归一化回0到255的范围。
			dstImage_GAMMA.at<Vec3b>(row, col)[0] = saturate_cast<uchar>(pow(srcImage_color.at<Vec3b>(row, col)[0]/255.0, gamma)*255);
			dstImage_GAMMA.at<Vec3b>(row, col)[1] = saturate_cast<uchar>(pow(srcImage_color.at<Vec3b>(row, col)[1]/255.0, gamma)*255);
			dstImage_GAMMA.at<Vec3b>(row, col)[2] = saturate_cast<uchar>(pow(srcImage_color.at<Vec3b>(row, col)[2]/255.0, gamma)*255);
		}
	imshow("GAMMA变换", dstImage_GAMMA);
	waitKey(0);
}

效果:
在这里插入图片描述
GAMMA=4时的效果,即映射的对应法则为,新像素=旧像素^4。

对比度拉伸

原理:扩展图像的动态范围。映射的对应法则为,(灰度最小值,0)点到(灰度最大值,255)点的一次函数。

#include <opencv2\highgui\highgui.hpp>
using namespace cv;
void main()
{
    Mat srcImage = imread("1.jpg", 0);
    Mat dstImage_ContrastStretch(srcImage.size(), CV_8U);
	int MinPixel, MaxPixel;
	//得到最小的灰度级
	//找图像中像素亮度最小值的原理:从灰度等级0开始统计每一级灰度总共的像素个数,当个数不等于0时,即找到了图像中最小的灰度值。
	for (int pixel = 0; pixel < 256; pixel++)
	{
		int sum;
		sum = 0;
		for (int row = 0; row < srcImage.rows; row++)
			for (int col = 0; col < srcImage.cols; col++)
			{
				if (srcImage.at<uchar>(row, col) == pixel)
					sum++;
			}
		if (sum != 0)
		{
			MinPixel = pixel;
			break;
		}
	}
	//得到最大的灰度级
	//找图像中像素亮度最大值的原理:从灰度等级255开始统计每一级灰度总共的像素个数,当个数不等于0时,即找到了图像中最大的灰度值。
	for (int pixel = 255; pixel >= 0; pixel--)
	{
		int sum = 0;
		for (int row = 0; row < srcImage.rows; row++)
			for (int col = 0; col < srcImage.cols; col++)
			{
				if (srcImage.at<uchar>(row, col) == pixel)
					sum++;
			}
		if (sum != 0)
		{
			MaxPixel = pixel;
			break;
		}
	}
	for (int row = 0; row < srcImage.rows; row++)
		for (int col = 0; col < srcImage.cols; col++)
			if (srcImage.at<uchar>(row, col) >= MinPixel&&srcImage.at<uchar>(row, col) <= MaxPixel)
				//此为映射的对应法则
				dstImage_ContrastStretch.at<uchar>(row, col) = 255 * srcImage.at<uchar>(row, col) / (MaxPixel - MinPixel) - 255 * MinPixel / (MaxPixel - MinPixel);
			else
				dstImage_ContrastStretch.at<uchar>(row, col) = 0;
	imshow("对比度拉伸", dstImage_ContrastStretch);
	waitKey(0);
}

原图:
在这里插入图片描述
效果:
在这里插入图片描述
可以看到图片的灰度变化更加清晰了。

灰度级分层

原理:将某一个灰度范围用特定颜色进行显示。

#include <opencv2\highgui\highgui.hpp>
using namespace cv;
void main()
{
    Mat srcImage = imread("1.jpg", 0);
    Mat dstImage_GrayLayer(srcImage.size(), CV_8U);
	for (int row = 0; row < srcImage.rows; row++)
		for (int col = 0; col < srcImage.cols; col++)
			if (srcImage.at<uchar>(row, col) >= 120 && srcImage.at<uchar>(row, col) <= 180)
				dstImage_GrayLayer.at<uchar>(row, col) = 255;
			else
				dstImage_GrayLayer.at<uchar>(row, col) = srcImage.at<uchar>(row, col);
	imshow("灰度级分层", dstImage_GrayLayer);
	waitKey(0);
}

效果:
在这里插入图片描述
该程序将灰度范围从120到180的像素替换成了灰度级为255(全白)的像素。

图像比特平面分层

原理:由于图像的灰度级别都是0到255,转换成二进制,则每个像素都是8个比特组成的。现在将每个像素的八个比特分成8个平面。再次将某一个比特位为1的像素赋予灰度级255(全白),某一个比特位为0的像素赋予灰度级0(全黑)。

#include <opencv2\highgui\highgui.hpp>
using namespace cv;
void main()
{
    Mat srcImage = imread("1.jpg", 0);
    Mat dstImage_BitLayer1(srcImage.size(), CV_8U);
	Mat dstImage_BitLayer2(srcImage.size(), CV_8U);
	Mat dstImage_BitLayer3(srcImage.size(), CV_8U);
	Mat dstImage_BitLayer4(srcImage.size(), CV_8U);
	Mat dstImage_BitLayer5(srcImage.size(), CV_8U);
	Mat dstImage_BitLayer6(srcImage.size(), CV_8U);
	Mat dstImage_BitLayer7(srcImage.size(), CV_8U);
	Mat dstImage_BitLayer8(srcImage.size(), CV_8U);
	for (int row = 0; row < srcImage.rows; row++)
		for (int col = 0; col < srcImage.cols; col++)
		{   
			//第一层,即最低位
			if (srcImage.at<uchar>(row, col) & 1)
				dstImage_BitLayer1.at<uchar>(row, col) = 255;
			else
				dstImage_BitLayer1.at<uchar>(row, col) = 0;
			//第二层
			if (srcImage.at<uchar>(row, col) & 2)
				dstImage_BitLayer2.at<uchar>(row, col) = 255;
			else
				dstImage_BitLayer2.at<uchar>(row, col) = 0;
			//第三层
			if (srcImage.at<uchar>(row, col) & 4)
				dstImage_BitLayer3.at<uchar>(row, col) = 255;
			else
				dstImage_BitLayer3.at<uchar>(row, col) = 0;
			//第四层
			if (srcImage.at<uchar>(row, col) & 8)
				dstImage_BitLayer4.at<uchar>(row, col) = 255;
			else
				dstImage_BitLayer4.at<uchar>(row, col) = 0;
			//第五层
			if (srcImage.at<uchar>(row, col) & 16)
				dstImage_BitLayer5.at<uchar>(row, col) = 255;
			else
				dstImage_BitLayer5.at<uchar>(row, col) = 0;
			//第六层
			if (srcImage.at<uchar>(row, col) & 32)
				dstImage_BitLayer6.at<uchar>(row, col) = 255;
			else
				dstImage_BitLayer6.at<uchar>(row, col) = 0;
			//第七层
			if (srcImage.at<uchar>(row, col) & 64)
				dstImage_BitLayer7.at<uchar>(row, col) = 255;
			else
				dstImage_BitLayer7.at<uchar>(row, col) = 0;
			//第八层
			if (srcImage.at<uchar>(row, col) & 128)
				dstImage_BitLayer8.at<uchar>(row, col) = 255;
			else
				dstImage_BitLayer8.at<uchar>(row, col) = 0;
		}
	imshow("比特平面分层,最底层", dstImage_BitLayer1);
	imshow("比特平面分层,第2层", dstImage_BitLayer2);
	imshow("比特平面分层,第3层", dstImage_BitLayer3);
	imshow("比特平面分层,第4层", dstImage_BitLayer4);
	imshow("比特平面分层,第5层", dstImage_BitLayer5);
	imshow("比特平面分层,第6层", dstImage_BitLayer6);
	imshow("比特平面分层,第7层", dstImage_BitLayer7);
	imshow("比特平面分层,第8层", dstImage_BitLayer8);
	waitKey(0);
}

第一层(最底层)
在这里插入图片描述
第二层
在这里插入图片描述
第三层
在这里插入图片描述
第四层
在这里插入图片描述
第五层
在这里插入图片描述
第六层
在这里插入图片描述
第七层
在这里插入图片描述
第八层
在这里插入图片描述

可以看出最后几层构成了原图的大部分细节,所以可以通过减少图像比特层数来达到压缩图像的目的。

拉普拉斯锐化

原理:图像锐化就是将灰度值变化比较强烈的地方突出显示。由于对于一阶微分来说,持续变化灰度级斜坡意味着一阶微分是一个较宽的非零常数,锐化后会产生太宽的边缘, 不美观;而二阶微分则会只会在斜坡起始端和结束端各有一个由零分开的起伏,其他区域都是0,所以产生的边缘较窄。对于二维图像来说,我们需要求图像的二阶偏微分,所以使用拉普拉斯算子,即图像在x方向和y方向的二阶微分之和。由于数字图像是离散的,所以在此要用到x方向和y方向的二阶差分之和,并将其用矩阵来表示。
注意,由于拉普拉斯算子中存在负值,而计算机显示图片的灰度级都是正的,所以首先要将原图像灰度级用浮点数进行表示,然后归一化到0到1的范围(为了防止锐化后的显示效果全白)。待浮点型原图像被拉普拉斯算子锐化完成后,由于saturate_cast会将负值变成0,所以图像会发黑。为了防止发黑,再此用浮点型原图像减去变换完的图像,从而得到正确的锐化效果。


//注:必须在浮点数范围内进行滤波
#include <opencv2\highgui\highgui.hpp>
using namespace cv;
void main()
{
    Mat srcImage = imread("1.jpg", 0);
	Mat dstImage_LaplaceSharpen(srcImage.size(), CV_32F);
	Mat srcImage_float;
	srcImage.convertTo(srcImage_float, CV_32F);//将输入图像变为浮点型,并储存到srcImage_float中
	//输入图像像素值归一化至0到1的范围
	for (int row = 1; row < srcImage.rows - 1; row++)
		for (int col = 1; col < srcImage.cols - 1; col++)
			srcImage_float.at<float>(row, col) = srcImage_float.at<float>(row, col) / 255;
	//进行拉普拉斯锐化滤波
	for (int row = 1; row < srcImage.rows-1; row++)
		for (int col = 1; col < srcImage.cols - 1; col++)
		{
			dstImage_LaplaceSharpen.at<float>(row, col) = saturate_cast<float>(
				srcImage_float.at<float>(row - 1, col - 1) + srcImage_float.at<float>(row - 1, col) +
				srcImage_float.at<float>(row - 1, col + 1) + srcImage_float.at<float>(row, col - 1) +
				srcImage_float.at<float>(row, col + 1) + srcImage_float.at<float>(row + 1, col - 1) +
				srcImage_float.at<float>(row + 1, col) + srcImage_float.at<float>(row + 1, col + 1) -
				8 * srcImage_float.at<float>(row, col));
			dstImage_LaplaceSharpen.at<float>(row, col) = srcImage_float.at<float>(row, col) - dstImage_LaplaceSharpen.at<float>(row, col);
		}
	imshow("拉普拉斯锐化", dstImage_LaplaceSharpen);
	waitKey(0);
}

效果:
在这里插入图片描述
在此我们使用了8邻域拉普拉斯算子,即在原始拉普拉斯算子的基础上将1或-1添加到掩模的四角处。

猜你喜欢

转载自blog.csdn.net/u011861755/article/details/82750092