opencv bilateral filtering

Bilateral filter (Bilateral filter) is a non-linear filtering method, which is a compromise processing combining the spatial proximity of the image and the similarity of pixel values, while considering the spatial information and gray similarity to achieve the purpose of retaining the edge and removing noise. .

The reason why the bilateral filter can preserve the edge (Edge Preserve) well while smoothing and denoising is that the kernel of the filter is generated by two functions:

  • A function determines the coefficient of the filter template by the pixel Euclidean distance (Euclidean distance is the real straight-line distance between two points in n-dimensional space)
  • Another function determines the coefficient of the filter by the gray value difference of the pixel

API

void bilateralFilter(InputArray src, OutputArray dst, int d, double sigmaColor, double sigmaSpace, int borderType=BORDER_DEFAULT )

int d : Indicates the diameter range of each pixel neighborhood during filtering . If this value is non- positive, the function calculates the value from the fifth parameter sigmaSpace .
. double sigmaColor : The sigma value of the color space filter. The larger the value of this parameter, the wider the color in the pixel neighborhood will be mixed together, resulting in a larger semi-equal color area .
.double sigmaSpace : The sigma value of the filter in the coordinate space, if the value is larger, it means that distant pixels with similar colors will affect each other, so that sufficiently similar colors in a larger area get the same color . When d>0, d specifies the size of the neighborhood and has nothing to do with sigmaSpace, otherwise d is proportional to sigmaSpace. .
int borderType=BORDER_DEFAULT: A border mode used infer pixels outside the image, with a default value of BORDER_DEFAULT.
 

Code implementation (refer to CV source code)

1

Before filtering, first calculate the gray value template coefficient

double color_coeff = -0.5 / (color_sigma * color_sigma);
vector<double> _color_weight(channels * 255); // 存放差值的平方
double *color_weight = &_color_weight[0];
for (int i = 0; i < channels * 256; i++)
        color_weight[i] = exp(i * i * color_coeff);

The template coefficient calculation formula of the gray value refers to the above formula, which is the square of the difference between the two gray values. When using, first take out the gray value val0 at the center of the template, and then take out the gray value val at other positions of the template in turn, and use abs(val - val0)the difference color_weightto get the coefficient of the gray value template from the table lookup.

The distance template is two-dimensional, and the method used here is more ingenious, turning it into one-dimensional.

vector<double> _space_weight(ksize * ksize); // 空间模板系数
vector<int> _space_ofs(ksize * ksize); // 模板窗口的坐标

// 生成空间模板
    int maxk = 0;
    for (int i = -radius; i <= radius; i++)
    {
        for (int j = -radius; j <= radius; j++)
        {
            double r = sqrt(i*i + j * j);
            if (r > radius)
                continue;
            space_weight[maxk] = exp(r * r * space_coeff); // 存放模板系数
            space_ofs[maxk++] = i * temp.step + j * channels; // 存放模板的位置,和模板系数相对应
        }
    }

Use a one-dimensional array to store the spatial template coefficients, and use another one-dimensional array to store the template position, corresponding to the coefficients.
The implementation of the entire code is as follows:

void myBilateralFilter(const Mat &src, Mat &dst, int ksize, double space_sigma, double color_sigma)
{
    int channels = src.channels();
    CV_Assert(channels == 1 || channels == 3);
    double space_coeff = -0.5 / (space_sigma * space_sigma);
    double color_coeff = -0.5 / (color_sigma * color_sigma);
    int radius = ksize / 2;
    Mat temp;
    copyMakeBorder(src, temp, radius, radius, radius, radius, BorderTypes::BORDER_REFLECT);
    vector<double> _color_weight(channels * 256); // 存放差值的平方
    vector<double> _space_weight(ksize * ksize); // 空间模板系数
    vector<int> _space_ofs(ksize * ksize); // 模板窗口的坐标
    double *color_weight = &_color_weight[0];
    double *space_weight = &_space_weight[0];
    int    *space_ofs = &_space_ofs[0];
    for (int i = 0; i < channels * 256; i++)
        color_weight[i] = exp(i * i * color_coeff);
    // 生成空间模板
    int maxk = 0;
    for (int i = -radius; i <= radius; i++)
    {
        for (int j = -radius; j <= radius; j++)
        {
            double r = sqrt(i*i + j * j);
            if (r > radius)
                continue;
            space_weight[maxk] = exp(r * r * space_coeff); // 存放模板系数
            space_ofs[maxk++] = i * temp.step + j * channels; // 存放模板的位置,和模板系数相对应
        }
    }
    // 滤波过程
    for (int i = 0; i < src.rows; i++)
    {
        const uchar *sptr = temp.data + (i + radius) * temp.step + radius * channels;
        uchar *dptr = dst.data + i * dst.step;
        if (channels == 1)
        {
            for (int j = 0; j < src.cols; j++)
            {
                double sum = 0, wsum = 0;
                int val0 = sptr[j]; // 模板中心位置的像素
                for (int k = 0; k < maxk; k++)
                {
                    int val = sptr[j + space_ofs[k]];
                    double w = space_weight[k] * color_weight[abs(val - val0)]; // 模板系数 = 空间系数 * 灰度值系数
                    sum += val * w;
                    wsum += w;
                }
                dptr[j] = (uchar)cvRound(sum / wsum);
            }
        }
        else if (channels == 3)
        {
            for (int j = 0; j < src.cols * 3; j+=3)
            {
                double sum_b = 0, sum_g = 0, sum_r = 0, wsum = 0;
                int b0 = sptr[j];
                int g0 = sptr[j + 1];
                int r0 = sptr[j + 2];
                for (int k = 0; k < maxk; k++)
                {
                    const uchar *sptr_k = sptr + j + space_ofs[k];
                    int b = sptr_k[0];
                    int g = sptr_k[1];
                    int r = sptr_k[2];
                    double w = space_weight[k] * color_weight[abs(b - b0) + abs(g - g0) + abs(r - r0)];
                    sum_b += b * w;
                    sum_g += g * w;
                    sum_r += r * w;
                    wsum += w;
                }
                wsum = 1.0f / wsum;
                b0 = cvRound(sum_b * wsum);
                g0 = cvRound(sum_g * wsum);
                r0 = cvRound(sum_r * wsum);
                dptr[j] = (uchar)b0;
                dptr[j + 1] = (uchar)g0;
                dptr[j + 2] = (uchar)r0;
            }
        }
    }
}

Need to pay attention to the acquisition of the image pixel value, first obtain the coordinate pointer of each row

const uchar *sptr = temp.data + (i + radius) * temp.step + radius * channels;
uchar *dptr = dst.data + i * dst.step;

In the filter loop, take each template position offset address space_ofsfrom

int val = sptr[j + space_ofs[k]];

This implementation method greatly reduces the time complexity of filtering.

2

//获取色彩模板(值域模板)
///
void getColorMask(std::vector<double>& colorMask, double colorSigma) {

	for (int i = 0; i < 256; ++i) {
		double colordiff = exp(-(i * i) / (2 * colorSigma * colorSigma));
		colorMask.push_back(colordiff);
	}

}
//获取高斯模板(空间模板)
void getGausssianMask(cv::Mat& Mask, cv::Size wsize, double spaceSigma) {
	Mask.create(wsize, CV_64F);
	int h = wsize.height;
	int w = wsize.width;
	int center_h = (h - 1) / 2;
	int center_w = (w - 1) / 2;
	double sum = 0.0;
	double x, y;

	for (int i = 0; i < h; ++i) {
		y = pow(i - center_h, 2);
		double* Maskdate = Mask.ptr<double>(i);
		for (int j = 0; j < w; ++j) {
			x = pow(j - center_w, 2);
			double g = exp(-(x + y) / (2 * spaceSigma * spaceSigma));
			Maskdate[j] = g;
			sum += g;
		}
	}
}
//双边滤波
void bilateralfiter(cv::Mat& src, cv::Mat& dst, cv::Size wsize, double spaceSigma, double colorSigma) {
	cv::Mat spaceMask;
	std::vector<double> colorMask;
	cv::Mat Mask0 = cv::Mat::zeros(wsize, CV_64F);
	cv::Mat Mask1 = cv::Mat::zeros(wsize, CV_64F);
	cv::Mat Mask2 = cv::Mat::zeros(wsize, CV_64F);

	getGausssianMask(spaceMask, wsize, spaceSigma);//空间模板
	getColorMask(colorMask, colorSigma);//值域模板
	int hh = (wsize.height - 1) / 2;
	int ww = (wsize.width - 1) / 2;
	dst.create(src.size(), src.type());
	//边界填充
	cv::Mat Newsrc;
	cv::copyMakeBorder(src, Newsrc, hh, hh, ww, ww, cv::BORDER_REPLICATE);//边界复制;

	for (int i = hh; i < src.rows + hh; ++i) {
		for (int j = ww; j < src.cols + ww; ++j) {
			double sum[3] = { 0 };
			int graydiff[3] = { 0 };
			double space_color_sum[3] = { 0.0 };

			for (int r = -hh; r <= hh; ++r) {
				for (int c = -ww; c <= ww; ++c) {
					if (src.channels() == 1) {
						int centerPix = Newsrc.at<uchar>(i, j);
						int pix = Newsrc.at<uchar>(i + r, j + c);
						graydiff[0] = abs(pix - centerPix);
						double colorWeight = colorMask[graydiff[0]];
						Mask0.at<double>(r + hh, c + ww) = colorWeight * spaceMask.at<double>(r + hh, c + ww);//滤波模板
						space_color_sum[0] = space_color_sum[0] + Mask0.at<double>(r + hh, c + ww);

					}
					else if (src.channels() == 3) {
						cv::Vec3b centerPix = Newsrc.at<cv::Vec3b>(i, j);
						cv::Vec3b bgr = Newsrc.at<cv::Vec3b>(i + r, j + c);
						graydiff[0] = abs(bgr[0] - centerPix[0]); graydiff[1] = abs(bgr[1] - centerPix[1]); graydiff[2] = abs(bgr[2] - centerPix[2]);
						double colorWeight0 = colorMask[graydiff[0]];
						double colorWeight1 = colorMask[graydiff[1]];
						double colorWeight2 = colorMask[graydiff[2]];
						Mask0.at<double>(r + hh, c + ww) = colorWeight0 * spaceMask.at<double>(r + hh, c + ww);//滤波模板
						Mask1.at<double>(r + hh, c + ww) = colorWeight1 * spaceMask.at<double>(r + hh, c + ww);
						Mask2.at<double>(r + hh, c + ww) = colorWeight2 * spaceMask.at<double>(r + hh, c + ww);
						space_color_sum[0] = space_color_sum[0] + Mask0.at<double>(r + hh, c + ww);
						space_color_sum[1] = space_color_sum[1] + Mask1.at<double>(r + hh, c + ww);
						space_color_sum[2] = space_color_sum[2] + Mask2.at<double>(r + hh, c + ww);

					}
				}
			}

			//滤波模板归一化
			if (src.channels() == 1)
				Mask0 = Mask0 / space_color_sum[0];
			else {
				Mask0 = Mask0 / space_color_sum[0];
				Mask1 = Mask1 / space_color_sum[1];
				Mask2 = Mask2 / space_color_sum[2];
			}


			for (int r = -hh; r <= hh; ++r) {
				for (int c = -ww; c <= ww; ++c) {

					if (src.channels() == 1) {
						sum[0] = sum[0] + Newsrc.at<uchar>(i + r, j + c) * Mask0.at<double>(r + hh, c + ww); //滤波
					}
					else if (src.channels() == 3) {
						cv::Vec3b bgr = Newsrc.at<cv::Vec3b>(i + r, j + c); //滤波
						sum[0] = sum[0] + bgr[0] * Mask0.at<double>(r + hh, c + ww);//B
						sum[1] = sum[1] + bgr[1] * Mask1.at<double>(r + hh, c + ww);//G
						sum[2] = sum[2] + bgr[2] * Mask2.at<double>(r + hh, c + ww);//R
					}
				}
			}

			for (int k = 0; k < src.channels(); ++k) {
				if (sum[k] < 0)
					sum[k] = 0;
				else if (sum[k] > 255)
					sum[k] = 255;
			}
			if (src.channels() == 1)
			{
				dst.at<uchar>(i - hh, j - ww) = static_cast<uchar>(sum[0]);
			}
			else if (src.channels() == 3)
			{
				cv::Vec3b bgr = { static_cast<uchar>(sum[0]), static_cast<uchar>(sum[1]), static_cast<uchar>(sum[2]) };
				dst.at<cv::Vec3b>(i - hh, j - ww) = bgr;
			}

		}
	}

}

 

Guess you like

Origin blog.csdn.net/m0_61897853/article/details/125071547