Opencv学习笔记(三)降低颜色空间

  首先认识一下简单的色彩降低方法(color reduction method),如果使用的是c或c++无符号的char(八字节大小的空间),一个信道(channel)有256个不同的值(2^8=256),但是如果使用的是GRB方案,三个channel的话,颜色的数量就会变为256*256*256,大概是16个million这么多,这么多的颜色数量,对于计算机来说仍然是一个负担,所以可以想一些方法来降低这些色彩数量。

  假定图片色度范围为0-255,N选取为64,则色彩数降低为4X4X4;换算公式为data=data/N*N+N/2;data/N获得色度区间0-64->064-128->164-128->1,128-192->2,128-256->3再乘以N,为0、64、128、192, 再加N/2 ,从0-255变为4个色度32、96、160、224,即变为每个范围内的中值。实例代码如下:

#include <iostream>
#include "opencv2/opencv.hpp"
using namespace std;
using namespace cv;


void colorReduce(Mat& srcImg, Mat& dstImg, int n)
{
	double start = static_cast<double>(getTickCount());
	for (int i = 0; i < dstImg.rows; i++)
	{
		for (int j = 0; j < dstImg.cols; j++)
		{
				dstImg.at<Vec3b>(i, j)[0] = (srcImg.at<Vec3b>(i, j)[0] / n) * n + n / 2; //将色度降低n倍
				dstImg.at<Vec3b>(i, j)[1] = (srcImg.at<Vec3b>(i, j)[1] / n) * n + n / 2;
				dstImg.at<Vec3b>(i, j)[2] = (srcImg.at<Vec3b>(i, j)[2] / n) * n + n / 2;
		}
	}
	double end = static_cast<double>(getTickCount());
	double time = (end - start) / getTickFrequency();
	cout << "运行时间为:" << time << "秒" << endl;
	imshow("原图像", srcImg);
	imshow("阈值处理后图像", dstImg);
	waitKey(0);

}

void colorReduceByPointer0(Mat& srcImg, Mat& dstImg, int n)
{
	double start = static_cast<double>(getTickCount());
	int row = dstImg.rows;
	int col = dstImg.cols * dstImg.channels();
	for (int i = 0; i < row; i++)
	{
		uchar* data = dstImg.ptr(i);	//	获取每行首地址
		for (int j = 0; j < col; j++)
		{
			data[j] = (data[j] / n) * n + n / 2;    //将色度降低n倍
		}
	}
	double end = static_cast<double>(getTickCount());
	double time = (end - start) / getTickFrequency();
	cout << "运行时间为:" << time << "秒" << endl;
	imshow("原图像", srcImg);
	imshow("阈值处理后的图像", dstImg);
	waitKey(0);
}

void colorReduceByPointer1(Mat& srcImg, Mat& dstImg, int n)
{
	double start = static_cast<double>(getTickCount());
	int row = dstImg.rows;
	int col = dstImg.cols * dstImg.channels();
	for (int i = 0; i < row; i++)
	{
		uchar* data = dstImg.ptr(i);
		for (int j = 0; j < col; j++)
		{
			data[j] = (data[j] / n) * n + n / 2;
		}
	}
	double end = static_cast<double>(getTickCount());
	double time = (end - start) / getTickFrequency();
	cout << "运行时间为:" << time << "秒" << endl;
	imshow("原图像:", srcImg);
	imshow("阈值处理后的图像", dstImg);
	waitKey(0);
}

void colorReduceByInterator0(Mat& srcImg, Mat& dstImg, int n)
{
	double start = static_cast<double>(getTickCount());
	Mat_<uchar>::iterator it = dstImg.begin<uchar>();
	Mat_<uchar>::iterator itend = dstImg.end<uchar>();
	for (; it != itend; it++)
	{
		*it = (*it / n ) * n + n/2;
	}
	double end = static_cast<double>(getTickCount());
	double time = (end - start) / getTickFrequency();
	cout << "运行时间为:" << time << "秒" << endl;
	imshow("原图像:", srcImg);
	imshow("阈值处理后的图像", dstImg);
	waitKey(0);
}

void colorReduceByInterator1(Mat& srcImg, Mat& dstImg, int n)
{
	double start = static_cast<double>(getTickCount());
	Mat_<Vec3b>::iterator it = dstImg.begin<Vec3b>();
	Mat_<Vec3b>::iterator itend = dstImg.end<Vec3b>();
	for (; it != itend; it++)
	{
		(*it)[0] = ((*it)[0] / n)*n + n / 2;
		(*it)[1] = ((*it)[1] / n)*n + n / 2;
		(*it)[2] = ((*it)[2] / n)*n + n / 2;

	}
	double end = static_cast<double>(getTickCount());
	double time = (end - start) / getTickFrequency();
	cout << "时间:" << time << "秒" << endl;
	imshow("原图像", srcImg);
	imshow("阈值处理后的图像", dstImg);
	waitKey(0);

}


int main(int argc, char* argv[])
{
	Mat GrayOutImg, ColorOutImg, GrayImg, ColorImg;
	GrayImg = imread("D:\\1.png", 0);
	GrayOutImg = GrayImg.clone();
	ColorImg = imread("D:\\1.png", 1);
	ColorOutImg = ColorImg.clone();

	//colorReduce(ColorImg, ColorOutImg, 32);			//0
	//colorReduceByInterator0(GrayImg, GrayOutImg, 32);		//1
	colorReduceByInterator1(ColorImg, ColorOutImg, 32);		//2
	//colorReduceByPointer0(InImg, GrayOutImg, 32);		        //3
	//colorReduceByPointer1(ColorImg, ColorOutImg, 32);		//4

	return 0;
}

猜你喜欢

转载自blog.csdn.net/xyu66/article/details/79954744
今日推荐