ORB-SLAM中 ORBextract.cpp代码解读

未经允许 禁止转载

转载请注明  CSDN Min220 原文网址

欢迎学习讨论交流!有误之处指出!

using namespace cv;
using namespace std;

namespace ORB_SLAM
{

	const float HARRIS_K = 0.04f;

	const int PATCH_SIZE = 31;
	const int HALF_PATCH_SIZE = 15;
	const int EDGE_THRESHOLD = 16;

	////////////////////////////////////////////////////////////////////////////////////////////////////////////
	/*---------------------------------------get the HarrisResponse-------------------------------------------*/

	static void
		HarrisResponses(const Mat& img, vector<KeyPoint>& pts, int blockSize, float harris_k)
	{
		//CV_Assert() means if the value in() is false,return a mistake message
		CV_Assert( img.type() == CV_8UC1 && blockSize*blockSize <= 2048 );

		size_t ptidx, ptsize = pts.size();

		const uchar* ptr00 = img.ptr<uchar>();        // get the ptr00 row in image
		int step = (int)(img.step/img.elemSize1());   // the number of bytes of a element' one channel
		int r = blockSize/2;                           // circle radius R 

		float scale = (1 << 2) * blockSize * 255.0f;      // 255.0f f means float,we can ignore,1.0f is same
		scale = 1.0f / scale;
		float scale_sq_sq = scale * scale * scale * scale;

		AutoBuffer<int> ofsbuf(blockSize*blockSize);
		int* ofs = ofsbuf;
		for( int i = 0; i < blockSize; i++ )
			for( int j = 0; j < blockSize; j++ )
				ofs[i*blockSize + j] = (int)(i*step + j);

		for( ptidx = 0; ptidx < ptsize; ptidx++ )
		{
			int x0 = cvRound(pts[ptidx].pt.x - r);
			int y0 = cvRound(pts[ptidx].pt.y - r);

			const uchar* ptr0 = ptr00 + y0*step + x0;
			int a = 0, b = 0, c = 0;

			for( int k = 0; k < blockSize*blockSize; k++ )
			{
				const uchar* ptr = ptr0 + ofs[k];
				int Ix = (ptr[1] - ptr[-1])*2 + (ptr[-step+1] - ptr[-step-1]) + (ptr[step+1] - ptr[step-1]);
				int Iy = (ptr[step] - ptr[-step])*2 + (ptr[step-1] - ptr[-step-1]) + (ptr[step+1] - ptr[-step+1]);
				a += Ix*Ix;
				b += Iy*Iy;
				c += Ix*Iy;
			}

			// Harris response function
			pts[ptidx].response = ((float)a * b - (float)c * c -
				harris_k * ((float)a + b) * ((float)a + b))*scale_sq_sq;
		}
	}

	////////////////////////////////////////////////////////////////////////////////////////////////////////////
	/*-----------------------------------get the angles of keypoints------------------------------------------*/
static float IC_Angle(const Mat& image, Point2f pt,  const vector<int> & u_max)
	{
		int m_01 = 0, m_10 = 0;




		// center means a cvRound 2D coordinate in image 


		const uchar* center = &image.at<uchar> (cvRound(pt.y), cvRound(pt.x));


		// Treat the center line differently, v=0
		for (int u = -HALF_PATCH_SIZE; u <= HALF_PATCH_SIZE; ++u)
			m_10 += u * center[u];


		// Go line by line in the circuI853lar patch
		int step = (int)image.step1();
		for (int v = 1; v <= HALF_PATCH_SIZE; ++v)
		{
			// Proceed over the two lines 上下和左右两条线同时计算
			int v_sum = 0;
			int d = u_max[v];
			for (int u = -d; u <= d; ++u)
			{
				int val_plus = center[u + v*step], val_minus = center[u - v*step];
				v_sum += (val_plus - val_minus);//计算上下的时候是有符号的,所以这边是减
				m_10 += u * (val_plus + val_minus);//这边加是由于u已经确定好了符号
			}
			m_01 += v * v_sum;
		}


		return fastAtan2((float)m_01, (float)m_10);
	}


	////////////////////////////////////////////////////////////////////////////////////////////////////////////
	/*-----------------------------------compute the descriptors------------------------------------------*/



	const float factorPI = (float)(CV_PI/180.f);
	static void computeOrbDescriptor(const KeyPoint& kpt,
		const Mat& img, const Point* pattern,uchar* desc)
	{
		float angle = (float)kpt.angle*factorPI;
		float a = (float)cos(angle), b = (float)sin(angle);


		const uchar* center = &img.at<uchar>(cvRound(kpt.pt.y), cvRound(kpt.pt.x));
		const int step = (int)img.step;


#define GET_VALUE(idx) \
	center[cvRound(pattern[idx].x*b + pattern[idx].y*a)*step + \
	cvRound(pattern[idx].x*a - pattern[idx].y*b)]




		for (int i = 0; i < 32; ++i, pattern += 16)
		{
			int t0, t1, val;
			t0 = GET_VALUE(0); t1 = GET_VALUE(1);
			val = t0 < t1;
			t0 = GET_VALUE(2); t1 = GET_VALUE(3);
			val |= (t0 < t1) << 1;
			t0 = GET_VALUE(4); t1 = GET_VALUE(5);
			val |= (t0 < t1) << 2;
			t0 = GET_VALUE(6); t1 = GET_VALUE(7);
			val |= (t0 < t1) << 3;
			t0 = GET_VALUE(8); t1 = GET_VALUE(9);
			val |= (t0 < t1) << 4;
			t0 = GET_VALUE(10); t1 = GET_VALUE(11);
			val |= (t0 < t1) << 5;
			t0 = GET_VALUE(12); t1 = GET_VALUE(13);
			val |= (t0 < t1) << 6;
			t0 = GET_VALUE(14); t1 = GET_VALUE(15);
			val |= (t0 < t1) << 7;


			desc[i] = (uchar)val;
		}


	#undef GET_VALUE
	}




	static int bit_pattern_31_[256*4] =
	{   //省略  };

	//////////////////////////////////////////////////////////////////////////////////////////////////////


	ORBextractor::ORBextractor(int _nfeatures, float _scaleFactor, int _nlevels, int _scoreType,
		int _fastTh):
	nfeatures(_nfeatures), scaleFactor(_scaleFactor), nlevels(_nlevels),
		scoreType(_scoreType), fastTh(_fastTh)
	{
		/*------------------------- 确定每一层的特征点数 ,使用等比数列----------------------------------*/


		// 先定义尺度大小和逆尺度大小


		mvScaleFactor.resize(nlevels);
		mvScaleFactor[0]=1;         // the factor of th first level
		for(int i=1; i<nlevels; i++)
			mvScaleFactor[i]=mvScaleFactor[i-1]*scaleFactor;


		float invScaleFactor = 1.0f/scaleFactor;
		mvInvScaleFactor.resize(nlevels);
		mvInvScaleFactor[0]=1;
		for(int i=1; i<nlevels; i++)
			mvInvScaleFactor[i]=mvInvScaleFactor[i-1]*invScaleFactor;


		mvImagePyramid.resize(nlevels);
		mvMaskPyramid.resize(nlevels);


		mnFeaturesPerLevel.resize(nlevels);         //包含 所有 每一层所要的特征点数目
		float factor = (float)(1.0 / scaleFactor);  //scaleFactor is a input 
		float nDesiredFeaturesPerScale = nfeatures*(1 - factor)/(1 - (float)pow((double)factor, (double)nlevels));  // 每层特征点数目


		int sumFeatures = 0;
		for( int level = 0; level < nlevels-1; level++ )
		{
			mnFeaturesPerLevel[level] = cvRound(nDesiredFeaturesPerScale);  //对上层的特征点数目取整
			sumFeatures += mnFeaturesPerLevel[level];                       //特征点总数求和
			nDesiredFeaturesPerScale *= factor;                             // 下层特征点点数目
		}
		mnFeaturesPerLevel[nlevels-1] = std::max(nfeatures - sumFeatures, 0);    //最大层特征点数目=需要的特征点数目-现有总共的特征点数目


		/*--------------------------------------------- 复制训练的模板 ----------------------------------------------*/
		// copy (IuputIterator_First, InoutIterator_Last, OutputIterator_Dest)
		const int npoints = 512;
		const Point* pattern0 = (const Point*)bit_pattern_31_;
		std::copy(pattern0, pattern0 + npoints, std::back_inserter(pattern));




		/*-----------------------------------------This is for orientation --------------------------------------------*/
		// pre-compute the end of a row in a circular patch


		//用于计算特征方向时,每个v坐标对应最大的u坐标
		umax.resize(HALF_PATCH_SIZE + 1);


		// 将v坐标划分为两部分进行计算,主要为了确保计算特征主方向的时候,x,y方向对称
		int v, v0, vmax = cvFloor(HALF_PATCH_SIZE * sqrt(2.f) / 2 + 1);
		int vmin = cvCeil(HALF_PATCH_SIZE * sqrt(2.f) / 2);
		const double hp2 = HALF_PATCH_SIZE * HALF_PATCH_SIZE;
		for (v = 0; v <= vmax; ++v)
			umax[v] = cvRound(sqrt(hp2 - v * v));                //勾股定理


		// Make sure we are symmetric ,即保证是一个圆
		for (v = HALF_PATCH_SIZE, v0 = 0; v >= vmin; --v)
		{
			while (umax[v0] == umax[v0 + 1])
				++v0;
			umax[v] = v0;
			++v0;
		}
	}


/////////////////////////////////////////////////////////////////////////////////////////////////////////


	/*--------------------------------------------计算定向---------------------------------------------*/
	//求得每一个关键点的角度

	static void computeOrientation(const Mat& image, vector<KeyPoint>& keypoints, const vector<int>& umax)
		
	{
		for (vector<KeyPoint>::iterator keypoint = keypoints.begin(),
			keypointEnd = keypoints.end(); keypoint != keypointEnd; ++keypoint)
		{
			keypoint->angle = IC_Angle(image, keypoint->pt, umax);   // 获得关键点角度
		}
	}


	/*-------------------------------------------计算关键点-----------------------------------------*/
	void ORBextractor::ComputeKeyPoints(vector<vector<KeyPoint> >& allKeypoints)
	{


		allKeypoints.resize(nlevels);


		float imageRatio = (float)mvImagePyramid[0].cols/mvImagePyramid[0].rows;  //图像纵横比


		for (int level = 0; level < nlevels; ++level)
		{
			const int nDesiredFeatures = mnFeaturesPerLevel[level];


			const int levelCols = sqrt((float)nDesiredFeatures/(5*imageRatio));  //是?论文里提到:每个格网里面至少五个点??
			const int levelRows = imageRatio*levelCols;


			// 得到每一层图像进行特征检测区域上下两个坐标
			const int minBorderX = EDGE_THRESHOLD;
			const int minBorderY = minBorderX;
			const int maxBorderX = mvImagePyramid[level].cols-EDGE_THRESHOLD;
			const int maxBorderY = mvImagePyramid[level].rows-EDGE_THRESHOLD;


			// 将待检测区域划分为格子的行列数
			const int W = maxBorderX - minBorderX;
			const int H = maxBorderY - minBorderY;
			const int cellW = ceil((float)W/levelCols);
			const int cellH = ceil((float)H/levelRows);
	


			const int nCells = levelRows*levelCols;
			const int nfeaturesCell = ceil((float)nDesiredFeatures/nCells);//每一个cell里存放的特征点数目




			// vector<T> v (n,i)  即是,向量v中包含了n个值为i的元素
			vector<vector<vector<KeyPoint> > > cellKeyPoints(levelRows, vector<vector<KeyPoint> >(levelCols));
			//means cellKeypoint has levelRows层,每一层中又有levelCols层,均初始化为0
			vector<vector<int> > nToRetain(levelRows,vector<int>(levelCols));
			vector<vector<int> > nTotal(levelRows,vector<int>(levelCols));
			vector<vector<bool> > bNoMore(levelRows,vector<bool>(levelCols,false));
			vector<int> iniXCol(levelCols);
			vector<int> iniYRow(levelRows);
			int nNoMore = 0;
			int nToDistribute = 0;




			float hY = cellH + 6;   
 			// 关于3 每一行都留出3个像素的宽度
			// 在每个格子内进行fast特征检测
			for(int i=0; i<levelRows; i++)
			{
				const float iniY = minBorderY + i*cellH - 3;//第i个cell的第一个Y
				iniYRow[i] = iniY;   // vector<int> iniYRow(levelRows)


				if(i == levelRows-1)  //如果循环到了最后一行
				{
					hY = maxBorderY+3-iniY;     //hY=3+Ymax-iniY=3+Ymax-(Ymin+(levelRows-1)*cellH -3)=6+Ymax-Ymin-H+cellH=cellH+6  
					if(hY<=0)				//hY牵扯到后面cellimage的大小 范围从iniY到 iniY+hY,不可能为负值
						continue;     //continue 只管for、while,不看if,不管多少if都直接无视;如果小于直接跳出本次循环,根据上一个注释的式子,正常是不会小于的
				}


				float hX = cellW + 6;
				for(int j=0; j<levelCols; j++)
				{
					float iniX;
					if(i==0)
					{
						iniX = minBorderX + j*cellW - 3;  //和上面计算的y其实是关于某条线对称的
						iniXCol[j] = iniX;
					}
					else
					{
						iniX = iniXCol[j];    // 和第一行的x值对齐
					}




					if(j == levelCols-1)
					{
						hX = maxBorderX+3-iniX;  //同上
						if(hX<=0)
							continue;
					}

					Mat cellImage = mvImagePyramid[level].rowRange(iniY,iniY+hY).colRange(iniX,iniX+hX);


					Mat cellMask;
					if(!mvMaskPyramid[level].empty())
						cellMask = cv::Mat(mvMaskPyramid[level],Rect(iniX,iniY,hX,hY));  //rect对象用来存储一个矩形框的左上角坐标、宽度和高度。


					cellKeyPoints[i][j].reserve(nfeaturesCell*5);  //论文中的至少5个点


					/*---------------------------------FAST检测-,还在循环中---------------------------------*/
					
					FAST(cellImage,cellKeyPoints[i][j],fastTh,true); //FAST函数检测关键子

					/*CV_EXPORTS void FAST( InputArray image, CV_OUT vector<KeyPoint>& keypoints,
						int threshold, bool nonmaxSuppression=true );*/

					

					if(cellKeyPoints[i][j].size()<=3)
					{
						cellKeyPoints[i][j].clear();


						FAST(cellImage,cellKeyPoints[i][j],7,true);   // 阈值为7
					}


					if( scoreType == ORB::HARRIS_SCORE )
					{
						// Compute the Harris corner
						HarrisResponses(cellImage,cellKeyPoints[i][j], 7, HARRIS_K);
					}


					const int nKeys = cellKeyPoints[i][j].size();  //格网里到底有多少个关键点
					nTotal[i][j] = nKeys;  //nTotal总点数


					if(nKeys>nfeaturesCell)   //如果格网里的点数比打算的要多
					{
						nToRetain[i][j] = nfeaturesCell;  //保存预先计算好的数目的点
						bNoMore[i][j] = false;            //nomore为假
					}
					else
					{
						nToRetain[i][j] = nKeys;   //否则先知道要保存的数目
						nToDistribute += nfeaturesCell-nKeys;   //还有多少需要离散的点的数目
						bNoMore[i][j] = true;
						nNoMore++;
					}


				}
			}




			// Retain by score


			while(nToDistribute>0 && nNoMore<nCells)    //如果 总共的离散点数大于0并且 未达到阈值的cell数目比总共的格网数小;直到不需要离散 不需要加点为止
			{
				int nNewFeaturesCell = nfeaturesCell + ceil((float)nToDistribute/(nCells-nNoMore));  //不够的cell需要加入后 新的点的数目(旧的加上均分的新的)
				nToDistribute = 0;


				for(int i=0; i<levelRows; i++)
				{
					for(int j=0; j<levelCols; j++)
					{
						if(!bNoMore[i][j])    //有足够点数的cell
						{
							if(nTotal[i][j]>nNewFeaturesCell)  //总数目甚至比新的要求的点数还要多(当所有cell都执行这个条件语句,while循环就可以终止了)
							{
								nToRetain[i][j] = nNewFeaturesCell;  //只保存新要求的点的数目即可
								bNoMore[i][j] = false;
							}
							else
							{
								nToRetain[i][j] = nTotal[i][j];
								nToDistribute += nNewFeaturesCell-nTotal[i][j];  //还要离散的点的数目
								bNoMore[i][j] = true;   //还要加点
								nNoMore++;
							}
						}
					}
				}
			}    

				
vector<KeyPoint> & keypoints = allKeypoints[level];
keypoints.reserve(nDesiredFeatures*2);


const int scaledPatchSize = PATCH_SIZE*mvScaleFactor[level];


// Retain by score and transform coordinates,
//换算特征点真实位置(添加边界值),添加特征点的尺度信息
for(int i=0; i<levelRows; i++)
{
for(int j=0; j<levelCols; j++)
{
vector<KeyPoint> &keysCell = cellKeyPoints[i][j];
KeyPointsFilter::retainBest(keysCell,nToRetain[i][j]);   // 保存最佳点
if((int)keysCell.size()>nToRetain[i][j])
keysCell.resize(nToRetain[i][j]);


for(size_t k=0, kend=keysCell.size(); k<kend; k++)
{
keysCell[k].pt.x+=iniXCol[j];
keysCell[k].pt.y+=iniYRow[i];
keysCell[k].octave=level;
keysCell[k].size = scaledPatchSize;
keypoints.push_back(keysCell[k]);
}
}
}
if((int)keypoints.size()>nDesiredFeatures)
{
KeyPointsFilter::retainBest(keypoints,nDesiredFeatures);
keypoints.resize(nDesiredFeatures);
}
}  //好大一个循环。。。


// and compute orientations
for (int level = 0; level < nlevels; ++level)
computeOrientation(mvImagePyramid[level], allKeypoints[level], umax);
}

/*-------------------------------------------描述子怎么算-----------------------------------------*/
static void computeDescriptors(const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors,
const vector<Point>& pattern)
{
descriptors = Mat::zeros((int)keypoints.size(), 32, CV_8UC1);


for (size_t i = 0; i < keypoints.size(); i++)
computeOrbDescriptor(keypoints[i], image, &pattern[0], descriptors.ptr((int)i));
}




/*--------------------------------------到了operator啦------------------------------------------*/
void ORBextractor::operator()( InputArray _image, InputArray _mask, vector<KeyPoint>& _keypoints,
OutputArray _descriptors)

if(_image.empty())
return;


Mat image = _image.getMat(), mask = _mask.getMat();
assert(image.type() == CV_8UC1 );


// Pre-compute the scale pyramids
ComputePyramid(image, mask);


vector < vector<KeyPoint> > allKeypoints;
ComputeKeyPoints(allKeypoints);


Mat descriptors;


int nkeypoints = 0;
for (int level = 0; level < nlevels; ++level)
nkeypoints += (int)allKeypoints[level].size();
if( nkeypoints == 0 )
_descriptors.release();
else
{
_descriptors.create(nkeypoints, 32, CV_8U);
descriptors = _descriptors.getMat();
}


_keypoints.clear();
_keypoints.reserve(nkeypoints);


int offset = 0;
for (int level = 0; level < nlevels; ++level)
{
vector<KeyPoint>& keypoints = allKeypoints[level];
int nkeypointsLevel = (int)keypoints.size();


if(nkeypointsLevel==0)
continue;


// Preprocess the resized image
Mat& workingMat = mvImagePyramid[level];
GaussianBlur(workingMat, workingMat, Size(7, 7), 2, 2, BORDER_REFLECT_101);


// Compute the descriptors
Mat desc = descriptors.rowRange(offset, offset + nkeypointsLevel);
computeDescriptors(workingMat, keypoints, desc, pattern);


offset += nkeypointsLevel;


// Scale keypoint coordinates
if (level != 0)
{
float scale = mvScaleFactor[level]; //getScale(level, firstLevel, scaleFactor);
for (vector<KeyPoint>::iterator keypoint = keypoints.begin(),
keypointEnd = keypoints.end(); keypoint != keypointEnd; ++keypoint)
keypoint->pt *= scale;
}
// And add the keypoints to the output
_keypoints.insert(_keypoints.end(), keypoints.begin(), keypoints.end());
}
}


/*-------------------------------------------金字塔建立-----------------------------------------*/
void ORBextractor::ComputePyramid(cv::Mat image, cv::Mat Mask)
{
for (int level = 0; level < nlevels; ++level)
{
float scale = mvInvScaleFactor[level];
Size sz(cvRound((float)image.cols*scale), cvRound((float)image.rows*scale));
Size wholeSize(sz.width + EDGE_THRESHOLD*2, sz.height + EDGE_THRESHOLD*2);
Mat temp(wholeSize, image.type()), masktemp;
mvImagePyramid[level] = temp(Rect(EDGE_THRESHOLD, EDGE_THRESHOLD, sz.width, sz.height));


if( !Mask.empty() )
{
masktemp = Mat(wholeSize, Mask.type());
mvMaskPyramid[level] = masktemp(Rect(EDGE_THRESHOLD, EDGE_THRESHOLD, sz.width, sz.height));
}


// Compute the resized image
if( level != 0 )
{
resize(mvImagePyramid[level-1], mvImagePyramid[level], sz, 0, 0, INTER_LINEAR);
if (!Mask.empty())
{
resize(mvMaskPyramid[level-1], mvMaskPyramid[level], sz, 0, 0, INTER_NEAREST);
}


copyMakeBorder(mvImagePyramid[level], temp, EDGE_THRESHOLD, EDGE_THRESHOLD, EDGE_THRESHOLD, EDGE_THRESHOLD,
BORDER_REFLECT_101+BORDER_ISOLATED);
if (!Mask.empty())
copyMakeBorder(mvMaskPyramid[level], masktemp, EDGE_THRESHOLD, EDGE_THRESHOLD, EDGE_THRESHOLD, EDGE_THRESHOLD,
BORDER_CONSTANT+BORDER_ISOLATED);
}
else
{
copyMakeBorder(image, temp, EDGE_THRESHOLD, EDGE_THRESHOLD, EDGE_THRESHOLD, EDGE_THRESHOLD,
BORDER_REFLECT_101);
if( !Mask.empty() )
copyMakeBorder(Mask, masktemp, EDGE_THRESHOLD, EDGE_THRESHOLD, EDGE_THRESHOLD, EDGE_THRESHOLD,
BORDER_CONSTANT+BORDER_ISOLATED);
}
}


}


} //namespace ORB_SLAM
 
 

 
 

猜你喜欢

转载自blog.csdn.net/jucilan3330/article/details/68961041
今日推荐