OpenCV学习(5): 图像畸变校正

畸变校正


畸变

摄像机的成像过程主要是主要涉及到几个坐标系的变换(具体过程可以参考相机模型):

Created with Raphaël 2.1.2 物体 世界坐标 摄像机坐标 图像物理坐标 图像像素坐标

从摄像机成像畸变的产生于是其“天生”的,不可避免的,这主要是由于透镜成像原理导致的。其畸变的原理可以参考相机模型)。它的畸变按照原理可以分解为切向畸变和径向畸变。

[ x y ] = ( 1 + k 1 r 2 + k 2 r 4 + k 3 r 6 ) [ x y ] + [ 2 p 1 x y + p 2 ( r 2 + 2 x 2 ) 2 p 1 ( r 2 + 2 y 2 ) + 2 p 2 x y ]

其中, [ x , y ] 为畸变后的位置, [ x , y ] 为畸变前的位置, [ k i , p i ] 为畸变系数。当然,其实畸变系数远远不止这么四个,但通常情况下可以仅考虑这四个。

校正原理

畸变校正的关键之处就是要找到畸变前后的点位置的对应关系。假定未畸变前,图像中各点的像素坐标可以通过公式得到:

{ x = X c / Z c y = Y c / Z c { u = f x x + c x v = f y y + c y

如果不存在畸变,那么理想情况下,摄像机成像的坐标转换就可以按照上式来进行计算。在有畸变的情况下,畸变后的坐标:
[ x y ] = ( 1 + k 1 r 2 + k 2 r 4 + k 3 r 6 ) [ x y ] + [ 2 p 1 x y + p 2 ( r 2 + 2 x 2 ) 2 p 1 ( r 2 + 2 y 2 ) + 2 p 2 x y ]

其中 r 2 = x 2 + y 2
与此同时可以得到畸变图像的各个像素的新坐标为:
{ u d = f x x + c x v d = f y y + c y

由此,我们得到了一个图像从摄像机坐标系,然后经过畸变,最后得到(畸变)图像的整个坐标变换过程中的各个点的映射关系。

畸变校正的目的就是要找到对应点对的像素值关系。将畸变后的位置的像素值赋给原位置。即:

f ( u , v ) = f ( h ( u , v ) ) = f ( u d , v d )

注意,由非畸变图像到畸变图像的映射关系为:
f ( u d , v d ) = f ( u , v )

值得注意的是,由于存在畸变,畸变前坐标为整数,畸变后并不一定为整数。而在图像像素坐标系中,坐标都是整数,因此在这个赋值过程中往往存在取整或者插值操作。

OpenCV畸变校正源码

initUndistortRectifyMap函数计算的就是两幅图像中,从未畸变图像到畸变图像的映射关系(map_1,map_2).

/*
@param cameraMatrix 相机矩阵 [{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}].
@param distCoeffs 畸变系数向量 (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]]),默认值为0
@param R 可选的校正矩阵 (3x3). 由#stereoRectify 计算得来的 R1 或 R2 可以在此处使用。默认为单位矩阵,在 cvInitUndistortMap R 假定为单位矩阵.
@param newCameraMatrix 新的相机矩阵  [{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}]
@param size 去畸变后图像尺寸.
@param m1type 第一个输出 map的类型:CV_32FC1, CV_32FC2 or CV_16SC2
@param map1 第一个输出 map.
@param map2 第二个输出 map.
 */

#include <opencv2\opencv.hpp>

void cv::initUndistortRectifyMap( InputArray _cameraMatrix, InputArray _distCoeffs,
                              InputArray _matR, InputArray _newCameraMatrix,
                              Size size, int m1type, OutputArray _map1, OutputArray _map2 )
{
    //获取矩阵数据
    Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
    Mat matR = _matR.getMat(), newCameraMatrix = _newCameraMatrix.getMat();

    //默认第一个输出map的类型为CV_16SC2
    if( m1type <= 0 )
        m1type = CV_16SC2;
    CV_Assert( m1type == CV_16SC2 || m1type == CV_32FC1 || m1type == CV_32FC2 );
    _map1.create( size, m1type );

    // 为什么CV_16SC2就不释放_map2 ,反而要开辟空间, 2通道 难道不足以存储坐标??
    Mat map1 = _map1.getMat(), map2;
    if( m1type != CV_32FC2 )
    {
        _map2.create( size, m1type == CV_16SC2 ? CV_16UC1 : CV_32FC1 );
        map2 = _map2.getMat();
    }
    else
        _map2.release();


    Mat_<double> R = Mat_<double>::eye(3, 3);
    Mat_<double> A = Mat_<double>(cameraMatrix), Ar;

    //如果新相机矩阵为空,默认为(旧)相机矩阵
    if( !newCameraMatrix.empty() )
        Ar = Mat_<double>(newCameraMatrix);
    else
        Ar = getDefaultNewCameraMatrix( A, size, true );

    //_matR默认为单位矩阵
    if( !matR.empty() )
        R = Mat_<double>(matR);

    //畸变系数向量默认为0
    if( !distCoeffs.empty() )
        distCoeffs = Mat_<double>(distCoeffs);
    else
    {
        distCoeffs.create(14, 1, CV_64F);
        distCoeffs = 0.;
    }

    CV_Assert( A.size() == Size(3,3) && A.size() == R.size() );
    CV_Assert( Ar.size() == Size(3,3) || Ar.size() == Size(4, 3));

    //LU分解求新的内参矩阵Ar与校正矩阵R乘积的逆矩阵iR
    Mat_<double> iR = (Ar.colRange(0,3)*R).inv(DECOMP_LU);
    const double* ir = &iR(0,0);//获取逆矩阵的地址

    //从旧的内参矩阵中取出光心位置u0,v0,和归一化焦距fx,fy
    double u0 = A(0, 2),  v0 = A(1, 2);
    double fx = A(0, 0),  fy = A(1, 1);

    //14个畸变系数,大多用到的只有(k1,k2,p1,p2)
    CV_Assert( distCoeffs.size() == Size(1, 4) || distCoeffs.size() == Size(4, 1) ||
               distCoeffs.size() == Size(1, 5) || distCoeffs.size() == Size(5, 1) ||
               distCoeffs.size() == Size(1, 8) || distCoeffs.size() == Size(8, 1) ||
               distCoeffs.size() == Size(1, 12) || distCoeffs.size() == Size(12, 1) ||
               distCoeffs.size() == Size(1, 14) || distCoeffs.size() == Size(14, 1));

    if( distCoeffs.rows != 1 && !distCoeffs.isContinuous() )
        distCoeffs = distCoeffs.t();

    const double* const distPtr = distCoeffs.ptr<double>();
    double k1 = distPtr[0];
    double k2 = distPtr[1];
    double p1 = distPtr[2];
    double p2 = distPtr[3];
    double k3 = distCoeffs.cols + distCoeffs.rows - 1 >= 5 ? distPtr[4] : 0.;
    double k4 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? distPtr[5] : 0.;
    double k5 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? distPtr[6] : 0.;
    double k6 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? distPtr[7] : 0.;
    double s1 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[8] : 0.;
    double s2 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[9] : 0.;
    double s3 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[10] : 0.;
    double s4 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[11] : 0.;
    double tauX = distCoeffs.cols + distCoeffs.rows - 1 >= 14 ? distPtr[12] : 0.;
    double tauY = distCoeffs.cols + distCoeffs.rows - 1 >= 14 ? distPtr[13] : 0.;

    //tauX,tauY梯形畸变,缺省默认为matTilt为单位阵
    // Matrix for trapezoidal distortion of tilted image sensor
    cv::Matx33d matTilt = cv::Matx33d::eye();
    cv::detail::computeTiltProjectionMatrix(tauX, tauY, &matTilt);

    for( int i = 0; i < size.height; i++ )
    {
        float* m1f = map1.ptr<float>(i);
        float* m2f = map2.empty() ? 0 : map2.ptr<float>(i);
        short* m1 = (short*)m1f;
        ushort* m2 = (ushort*)m2f;

        //利用逆矩阵iR将二维图像坐标[j,i](不是[i,j])转换到摄像机坐标系(_x,_y,_w)
        double _x = i*ir[1] + ir[2], _y = i*ir[4] + ir[5], _w = i*ir[7] + ir[8];

        for( int j = 0; j < size.width; j++, _x += ir[0], _y += ir[3], _w += ir[6] )
        {
            //摄像机坐标系归一化,令Z=1平面
            double w = 1./_w, x = _x*w, y = _y*w;
            //根据畸变模型进行变换
            double x2 = x*x, y2 = y*y;
            double r2 = x2 + y2, _2xy = 2*x*y;
            double kr = (1 + ((k3*r2 + k2)*r2 + k1)*r2)/(1 + ((k6*r2 + k5)*r2 + k4)*r2);
            double xd = (x*kr + p1*_2xy + p2*(r2 + 2*x2) + s1*r2+s2*r2*r2);
            double yd = (y*kr + p1*(r2 + 2*y2) + p2*_2xy + s3*r2+s4*r2*r2);
           //根据求取的xd,yd将三维坐标重投影到二维畸变图像坐标(u,v)
            cv::Vec3d vecTilt = matTilt*cv::Vec3d(xd, yd, 1);
            double invProj = vecTilt(2) ? 1./vecTilt(2) : 1;
            double u = fx*invProj*vecTilt(0) + u0;
            double v = fy*invProj*vecTilt(1) + v0;
            //保存u,v的值到Mapx,Mapy中
            if( m1type == CV_16SC2 )
            {
                int iu = saturate_cast<int>(u*INTER_TAB_SIZE);
                int iv = saturate_cast<int>(v*INTER_TAB_SIZE);
                m1[j*2] = (short)(iu >> INTER_BITS);
                m1[j*2+1] = (short)(iv >> INTER_BITS);
                m2[j] = (ushort)((iv & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + 
                        (iu & (INTER_TAB_SIZE-1)));
            }
            else if( m1type == CV_32FC1 )
            {
                m1f[j] = (float)u;
                m2f[j] = (float)v;
            }
            else
            {
                m1f[j*2] = (float)u;
                m1f[j*2+1] = (float)v;
            }
        }
    }
}

猜你喜欢

转载自blog.csdn.net/xholes/article/details/80599802
今日推荐