快速双边滤波实现美颜

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/zhangpengzp/article/details/87884584

快速双边滤波

双边滤波的原理网上有很多博客讲到,就是考虑到空间、和值域2方面来滤波,来达到保证滤波后的图像边缘保存较好。快速滤波的方式很多,大家看看看这些文章http://people.csail.mit.edu/sparis/bf/

本文使用到的快速双边滤波是参考的这篇文章:快速双边滤波 附完整C代码,参考论文则参考得是https://github.com/Fig1024/OP_RBF/blob/master/RBFilterPlain.cpp

这里还是贴出来:

头文件

void    CRBFilter(unsigned char* Input, unsigned char* Output, int Width, int Height, int Stride, float sigmaSpatial, float sigmaRange);

调用CRBFilter 既可实现快速双边滤波,这里参数解释:

input 输入图像,可以为1、3、4通道。

output: 输出图像

width 图像宽度

height 图像宽度

Stride 为图象数组一行的大小 为 width * channels 

扫描二维码关注公众号,回复: 5490093 查看本文章

sigmSpatial :从字面意思应该是空间域sigm参数,但通过实验 好像是 像素值 的sigm参数,对颜色值有关系 取值[0,1]

sigmRange:实验得,类似d或者size得大小,越大,图像越模糊,类似空间域得sigm参数,取值范围也为[0,1]

cpp文件


#include "fastbilateralFilter.h"
#include "stdafx.h"
#include <algorithm>
#include<iostream>
int  getDiffFactor(const unsigned char* color1, const unsigned char* color2, const  int & channels)
{
	int final_diff;
	int component_diff[4];

	// find absolute difference between each component
	for (int i = 0; i < channels; i++)
	{
	//	printf("i = %d , channel = %d", i, channels);
		component_diff[i] = abs(color1[i] - color2[i]);
	}

	// based on number of components, produce a single difference value in the 0-255 range
	switch (channels)
	{
	case 1:
		final_diff = component_diff[0];
		break;

	case 2:
		final_diff = ((component_diff[0] + component_diff[1]) >> 1);
		break;

	case 3:
		final_diff = ((component_diff[0] + component_diff[2]) >> 2) + (component_diff[1] >> 1);
		break;

	case 4:
		final_diff = ((component_diff[0] + component_diff[1] + component_diff[2] + component_diff[3]) >> 2);
		break;

	default:
		final_diff = 0;
	}

	_ASSERT(final_diff >= 0 && final_diff <= 255);

	return final_diff;
}

void CRB_HorizontalFilter(unsigned char* Input, unsigned char* Output, int Width, int Height, int Channels, float * range_table_f, float inv_alpha_f, float* left_Color_Buffer, float* left_Factor_Buffer, float* right_Color_Buffer, float* right_Factor_Buffer)
{

	// Left pass and Right pass 

	int Stride = Width * Channels;
	const unsigned char* src_left_color = Input;
	float* left_Color = left_Color_Buffer;
	float* left_Factor = left_Factor_Buffer;

	int last_index = Stride * Height - 1;
	const unsigned char* src_right_color = Input + last_index;
	float* right_Color = right_Color_Buffer + last_index;
	float* right_Factor = right_Factor_Buffer + Width * Height - 1;

	for (int y = 0; y < Height; y++)
	{
		const unsigned char* src_left_prev = Input;
		const float* left_prev_factor = left_Factor;
		const float* left_prev_color = left_Color;

		const unsigned char* src_right_prev = src_right_color;
		const float* right_prev_factor = right_Factor;
		const float* right_prev_color = right_Color;

		// process 1st pixel separately since it has no previous
		{
			//if x = 0 
			*left_Factor++ = 1.f;
			*right_Factor-- = 1.f;
			for (int c = 0; c < Channels; c++)
			{
				*left_Color++ = *src_left_color++;
				*right_Color-- = *src_right_color--;
			}
		}
		// handle other pixels
		for (int x = 1; x < Width; x++)
		{
			// determine difference in pixel color between current and previous
			// calculation is different depending on number of channels
			int left_diff = getDiffFactor(src_left_color, src_left_prev, Channels);
			src_left_prev = src_left_color;

			int right_diff = getDiffFactor(src_right_color, src_right_color - Channels, Channels);
			src_right_prev = src_right_color;

			float left_alpha_f = range_table_f[left_diff];
			float right_alpha_f = range_table_f[right_diff];
			*left_Factor++ = inv_alpha_f + left_alpha_f * (*left_prev_factor++);
			*right_Factor-- = inv_alpha_f + right_alpha_f * (*right_prev_factor--);

			for (int c = 0; c < Channels; c++)
			{
				*left_Color++ = (inv_alpha_f * (*src_left_color++) + left_alpha_f * (*left_prev_color++));
				*right_Color-- = (inv_alpha_f * (*src_right_color--) + right_alpha_f * (*right_prev_color--));
			}
		}
	}
	// vertical pass will be applied on top on horizontal pass, while using pixel differences from original image
	// result color stored in 'leftcolor' and vertical pass will use it as source color
	{
		unsigned char* dst_color = Output; // use as temporary buffer  
		const float* leftcolor = left_Color_Buffer;
		const float* leftfactor = left_Factor_Buffer;
		const float* rightcolor = right_Color_Buffer;
		const float* rightfactor = right_Factor_Buffer;

		int width_height = Width * Height;
		for (int i = 0; i < width_height; i++)
		{
			// average color divided by average factor
			float factor = 1.f / ((*leftfactor++) + (*rightfactor++));
			for (int c = 0; c < Channels; c++)
			{

				*dst_color++ = (factor * ((*leftcolor++) + (*rightcolor++)));

			}
		}
	}
}

void CRB_VerticalFilter(unsigned char* Input, unsigned char* Output, int Width, int Height, int Channels, float * range_table_f, float inv_alpha_f, float* down_Color_Buffer, float* down_Factor_Buffer, float* up_Color_Buffer, float* up_Factor_Buffer)
{

	// Down pass and Up pass 
	int Stride = Width * Channels;
	const unsigned char* src_color_first_hor = Output; // result of horizontal pass filter 
	const unsigned char* src_down_color = Input;
	float* down_color = down_Color_Buffer;
	float* down_factor = down_Factor_Buffer;

	const unsigned char* src_down_prev = src_down_color;
	const float* down_prev_color = down_color;
	const float* down_prev_factor = down_factor;


	int last_index = Stride * Height - 1;
	const unsigned char* src_up_color = Input + last_index;
	const unsigned char* src_color_last_hor = Output + last_index; // result of horizontal pass filter
	float* up_color = up_Color_Buffer + last_index;
	float* up_factor = up_Factor_Buffer + (Width * Height - 1);

	const float* up_prev_color = up_color;
	const float* up_prev_factor = up_factor;

	// 1st line done separately because no previous line
	{
		//if y=0 
		for (int x = 0; x < Width; x++)
		{
			*down_factor++ = 1.f;
			*up_factor-- = 1.f;
			for (int c = 0; c < Channels; c++)
			{
				*down_color++ = *src_color_first_hor++;
				*up_color-- = *src_color_last_hor--;
			}
			src_down_color += Channels;
			src_up_color -= Channels;
		}
	}
	// handle other lines 
	for (int y = 1; y < Height; y++)
	{
		for (int x = 0; x < Width; x++)
		{
			// determine difference in pixel color between current and previous
			// calculation is different depending on number of channels
			int down_diff = getDiffFactor(src_down_color, src_down_prev, Channels);
			src_down_prev += Channels;
			src_down_color += Channels;
			src_up_color -= Channels;
			int up_diff = getDiffFactor(src_up_color, src_up_color + Stride, Channels);
			float down_alpha_f = range_table_f[down_diff];
			float up_alpha_f = range_table_f[up_diff];

			*down_factor++ = inv_alpha_f + down_alpha_f * (*down_prev_factor++);
			*up_factor-- = inv_alpha_f + up_alpha_f * (*up_prev_factor--);

			for (int c = 0; c < Channels; c++)
			{
				*down_color++ = inv_alpha_f * (*src_color_first_hor++) + down_alpha_f * (*down_prev_color++);
				*up_color-- = inv_alpha_f * (*src_color_last_hor--) + up_alpha_f * (*up_prev_color--);
			}
		}
	}

	// average result of vertical pass is written to output buffer
	{
		unsigned char *dst_color = Output;
		const float* downcolor = down_Color_Buffer;
		const float* downfactor = down_Factor_Buffer;
		const float* upcolor = up_Color_Buffer;
		const float* upfactor = up_Factor_Buffer;

		int width_height = Width * Height;
		for (int i = 0; i < width_height; i++)
		{
			// average color divided by average factor
			float factor = 1.f / ((*upfactor++) + (*downfactor++));
			for (int c = 0; c < Channels; c++)
			{
				*dst_color++ = (factor * ((*upcolor++) + (*downcolor++)));
			}
		}
	}
}

// memory must be reserved before calling image filter
// this implementation of filter uses plain C++, single threaded
// channel count must be 3 or 4 (alpha not used)
void    CRBFilter(unsigned char* Input, unsigned char* Output, int Width, int Height, int Stride, float sigmaSpatial, float sigmaRange)
{
	int Channels = Stride / Width;
	int reserveWidth = Width;
	int reserveHeight = Height;
	// basic sanity check
	_ASSERT(Input);
	_ASSERT(Output);
	_ASSERT(reserveWidth >= 10 && reserveWidth < 10000);
	_ASSERT(reserveHeight >= 10 && reserveHeight < 10000);
	_ASSERT(Channels >= 1 && Channels <= 4);

	int reservePixels = reserveWidth * reserveHeight;
	int numberOfPixels = reservePixels * Channels;

	float* leftColorBuffer = (float*)calloc(sizeof(float)*numberOfPixels, 1);
	float* leftFactorBuffer = (float*)calloc(sizeof(float)*reservePixels, 1);
	float* rightColorBuffer = (float*)calloc(sizeof(float)*numberOfPixels, 1);
	float* rightFactorBuffer = (float*)calloc(sizeof(float)*reservePixels, 1);

	if (leftColorBuffer == NULL || leftFactorBuffer == NULL || rightColorBuffer == NULL || rightFactorBuffer == NULL)
	{
		if (leftColorBuffer)  free(leftColorBuffer);
		if (leftFactorBuffer) free(leftFactorBuffer);
		if (rightColorBuffer) free(rightColorBuffer);
		if (rightFactorBuffer) free(rightFactorBuffer);

		return;
	}
	float* downColorBuffer = leftColorBuffer;
	float* downFactorBuffer = leftFactorBuffer;
	float* upColorBuffer = rightColorBuffer;
	float* upFactorBuffer = rightFactorBuffer;
	// compute a lookup table
	float alpha_f = static_cast<float>(exp(-sqrt(2.0) / (sigmaSpatial * 255)));
	float inv_alpha_f = 1.f - alpha_f;


	float range_table_f[255 + 1];
	float inv_sigma_range = 1.0f / (sigmaRange * 255);

	float ii = 0.f;
	for (int i = 0; i <= 255; i++, ii -= 1.f)
	{
		range_table_f[i] = alpha_f * exp(ii * inv_sigma_range);
	}
	CRB_HorizontalFilter(Input, Output, Width, Height, Channels, range_table_f, inv_alpha_f, leftColorBuffer, leftFactorBuffer, rightColorBuffer, rightFactorBuffer);

	CRB_VerticalFilter(Input, Output, Width, Height, Channels, range_table_f, inv_alpha_f, downColorBuffer, downFactorBuffer, upColorBuffer, upFactorBuffer);

	if (leftColorBuffer)
	{
		free(leftColorBuffer);
		leftColorBuffer = NULL;
	}

	if (leftFactorBuffer)
	{
		free(leftFactorBuffer);
		leftFactorBuffer = NULL;
	}

	if (rightColorBuffer)
	{
		free(rightColorBuffer);
		rightColorBuffer = NULL;
	}

	if (rightFactorBuffer)
	{
		free(rightFactorBuffer);
		rightFactorBuffer = NULL;
	}
}

调用测试

#include<iostream>
#include <sys\timeb.h>

// 获得系统时间
long long getSystemTime() {
	timeb t;
	ftime(&t);
	return t.time * 1000 + t.millitm;
}
int main()
{
    
	
	Mat  lammy  = imread("images/1.jpg", 1 );
	long long t_start = getSystemTime();
	bilateralFilter(lammy, bilateralMat, -1, 50, 15 );
	long long t_end = getSystemTime();
	showMat("bilateralMat opencv", 0.6, bilateralMat);

	Mat bilateralMat2;
	lammy.copyTo(bilateralMat2);

	long long t_start2 = getSystemTime();
	int channel = lammy.channels();
	CRBFilter(lammy.data, bilateralMat2.data, lammy.cols, lammy.rows, lammy.cols*channel, 0.1, 0.1);
	long long t_end2 = getSystemTime();
	showMat("beutyWindow", 0.6, bilateralMat2);
	
	cout << "time = " << t_end - t_start << endl;
	cout << "time2 = " << t_end2 - t_start2 << endl;
}

里面showMat只是自定义得显示Mat得函数,这里如果想测试,则可以自己写成imshow来显示。利用include <sys\timeb.h> 来获得系统时间。然后来对比opencv得双边滤波 和 快速滤波得效率。这里因为参数得不同,自己调节参数,使得2个滤波得效果大致相同,来比较得时间,测试图片分辨率为1080X1440 ,测试时间如图:

融合

滤波后,人得皮肤会遍得很光滑,但会随时一些脸部纹理细节,因此对原图和滤波后得图进行一个加权融合。这里直接调用得opencv函数:addWeighted

整体算法如下:

// opencvstudy.cpp: 定义控制台应用程序的入口点。
//

#include "stdafx.h"
#include "videoUtil.h"
#include "ImagepixelUtil.h"
#include<iostream>
#include <sys\timeb.h>

#include "fastbilateralFilter.h"
using namespace std;
const char *beutyWindow = "美颜";
Mat bilateralMat , bilateralMat2;
Mat beauty;

void showMat(const char* a, float smallRatio, Mat mat) {

	int h = mat.rows;
	int w = mat.cols;

	// 这里定义为0,则窗口图像会随着窗口压缩,为1则不会,若窗口小于图像没,
	//则图像显示不全
	cvNamedWindow(a, 0);
	cvResizeWindow(a, w * smallRatio, h * smallRatio);
	imshow(a, mat);

}
void showMat(const char* a, Mat mat) {
	int h = mat.rows;
	int w = mat.cols;
	float smallRatio = 1;
	if (h > 1000) {
		smallRatio = h / 800.0;
		//cout << smallRatio << endl;
	}
	// 这里定义为0,则窗口图像会随着窗口压缩,为1则不会,若窗口小于图像没,
	//则图像显示不全
	cvNamedWindow(a, 0);
	cvResizeWindow(a, w / smallRatio, h / smallRatio);
	imshow(a, mat);
}
// 获得系统时间
long long getSystemTime() {
	timeb t;
	ftime(&t);
	return t.time * 1000 + t.millitm;
}
void onChange(int pos, void* userdata)
{
	
	Mat *lammy = (Mat*)userdata;
	float alpha1 = pos / 100.0;
	
	long  t_start = getSystemTime();
	addWeighted(*lammy, 1 - alpha1, bilateralMat2, alpha1, 0, beauty);
	long  t_end = getSystemTime();
	cout << "融合 = " << t_end - t_start << endl; 

	showMat(beutyWindow, beauty);
	
};

int main()
{
    
	
	Mat  lammy  = imread("images/1.jpg", 1 );
	long t_start = getSystemTime();
	bilateralFilter(lammy, bilateralMat, -1, 50, 15 );
	long t_end = getSystemTime();
	showMat("bilateralMat opencv", 1, bilateralMat);

	
	lammy.copyTo(bilateralMat2);
	int channel = lammy.channels();

	long t_start2 = getSystemTime();
	CRBFilter(lammy.data, bilateralMat2.data, lammy.cols, lammy.rows, lammy.cols*channel, 0.1, 0.1);
	long t_end2 = getSystemTime();
	showMat("beutyWindow", 1, bilateralMat2);
	
	cout << "time = " << t_end - t_start << endl;
	cout << "time2 = " << t_end2 - t_start2 << endl;

	float alpha1 = 1;
	addWeighted(lammy, alpha1, bilateralMat2, 1 - alpha1, 0, beauty);
	showMat(beutyWindow, 1, beauty);

	int progress = 0;
	createTrackbar("progress = ",beutyWindow, &progress, 100, onChange, &lammy);



	waitKey(0);
	return 0;
}

结果如下:可能gif得录屏软件不好,图像模糊了。,这个图像分辨率较小345X381,常规得双边滤波302ms,快速只要31ms。

完整得demo提供,欢迎各位老铁下载。

猜你喜欢

转载自blog.csdn.net/zhangpengzp/article/details/87884584