-
Corner points, that is, pixels with more prominent attributes in the image
-
The commonly used corners are as follows:
- The pixel corresponding to the maximum value of the gradient
- The intersection of two straight lines or curves
- The pixel point with the maximum derivative value of the first-order gradient and the maximum gradient direction change rate
- The pixel with the largest first derivative value, but the second derivative value is 0
- API introduction:
void cornerHarris (
InputArray src, // 输入图像 (单通道,8位或浮点型)
OutputArray dst, // 输出图像 (类型 CV_32FC1,大小同 src)
int blockSize, // 邻域大小
int ksize, // Sobel 算子的孔径大小
double k, // 经验参数,取值范围 0.04 ~ 0.06
int borderType = BORDER_DEFAULT // 边界模式
)
void goodFeaturesToTrack (
InputArray image, // 输入图像 (单通道,8位或浮点型32位)
OutputArray corners, // 检测到的角点
int maxCorners, // 最多允许返回的角点数量
double qualityLevel, // 质量水平
double minDistance, // 角点间的最小欧拉距离
InputArray mask = noArray(), //
int blockSize = 3, //
bool useHarrisDetector = false, //
double k = 0.04 //
)
void cornerSubPix(
InputArray image, // 输入图象(单通道,8位或浮点型)
InputOutputArray corners, // 亚像素精度的角点坐标
Size winSize, // 搜索窗口尺寸的 1/2
Size zeroZone, //
TermCriteria criteria // 迭代终止准则
)
1. Harris Corner
-
It is mainly used to detect the endpoint of a line segment in an image or the intersection of two line segments
-
Define a local small window in the image, and then move this window in various directions, there will be three situations a) b) c) corresponding to flat areas, edges and corners respectively. The following figure shows the detection idea:
- The image intensity in the window does not change when the window moves in all directions, so the window is a "flat area" and there are no corners
- If the image intensity in the window changes greatly when the window moves in one (some) directions, but does not change in other directions, then there may be "edges" in the window
- When the image intensity in the window changes greatly when the window moves in all directions, it is considered that there is a "corner" in the window
- Its main theories are as follows:
- demo
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
Mat src, src_gray;
int thresh = 200;
int max_thresh = 255;
const char* source_window = "Source image";
const char* corners_window = "Corners detected";
void cornerHarris_demo( int, void* );
int main( int argc, char** argv )
{
CommandLineParser parser( argc, argv, "{@input | building.jpg | input image}" );
src = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if ( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
cvtColor( src, src_gray, COLOR_BGR2GRAY );
namedWindow( source_window );
createTrackbar( "Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo );
imshow( source_window, src );
cornerHarris_demo( 0, 0 );
waitKey();
return 0;
}
void cornerHarris_demo( int, void* )
{
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
Mat dst = Mat::zeros( src.size(), CV_32FC1 );
cornerHarris( src_gray, dst, blockSize, apertureSize, k );
Mat dst_norm, dst_norm_scaled;
normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
convertScaleAbs( dst_norm, dst_norm_scaled );
for( int i = 0; i < dst_norm.rows ; i++ )
{
for( int j = 0; j < dst_norm.cols; j++ )
{
if( (int) dst_norm.at<float>(i,j) > thresh )
{
circle( dst_norm_scaled, Point(j,i), 5, Scalar(0), 2, 8, 0 );
}
}
}
namedWindow( corners_window );
imshow( corners_window, dst_norm_scaled );
}
2. Shi-Tomasi corner point
-
Later in 1994, J. Shi and C. Tomasi made a small modification to it in their paper Good Features to Track, showing better results compared to Harris Corner Detector. The scoring function in the Harris corner detector is given by:
-
OpenCV has a function for this
cv.goodFeaturesToTrack()
. It finds the N strongest corners in the image by the Shi-Tomasi method (or Harris corner detection, if you specified). First the image is a grayscale image. Specify in advance the number of corner points to find, and then, specify a quality value, which is between 0-1, indicating the minimum quality of corners below which each one is rejected. We then provide the minimum Euclidean distance between detected corners. -
With all this information, the function finds the corners in the image. All corners below the quality level will be culled. It then sorts the remaining corners in descending order by quality. The function then takes the first strongest corner, discards all nearby corners within the minimum distance, and returns the N strongest corners.
In the example below we will try to find the 25 best corners:
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('blox.jpg')
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
corners = cv.goodFeaturesToTrack(gray,25,0.01,10)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv.circle(img,(x,y),3,255,-1)
plt.imshow(img),plt.show()
- Example of use:
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
Mat src, src_gray;
int maxCorners = 23;
int maxTrackbar = 100;
RNG rng(12345);
const char* source_window = "Image";
void goodFeaturesToTrack_Demo( int, void* );
int main( int argc, char** argv )
{
CommandLineParser parser( argc, argv, "{@input | pic3.png | input image}" );
src = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
cvtColor( src, src_gray, COLOR_BGR2GRAY );
namedWindow( source_window );
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
imshow( source_window, src );
goodFeaturesToTrack_Demo( 0, 0 );
waitKey();
return 0;
}
void goodFeaturesToTrack_Demo( int, void* )
{
maxCorners = MAX(maxCorners, 1);
vector<Point2f> corners;
double qualityLevel = 0.01;
double minDistance = 10;
int blockSize = 3, gradientSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
Mat copy = src.clone();
goodFeaturesToTrack( src_gray,
corners,
maxCorners,
qualityLevel,
minDistance,
Mat(),
blockSize,
gradientSize,
useHarrisDetector,
k );
cout << "** Number of corners detected: " << corners.size() << endl;
int radius = 4;
for( size_t i = 0; i < corners.size(); i++ )
{
circle( copy, corners[i], radius, Scalar(rng.uniform(0,255), rng.uniform(0, 256), rng.uniform(0, 256)), FILLED );
}
namedWindow( source_window );
imshow( source_window, copy );
}
3. Corner detector
cv::cornerEigenValsAndVecs
Find the eigenvalues and eigenvectors using the OpenCV function to determine if a pixel is a corner.cv::cornerMinEigenVal
Find the smallest eigenvalue for corner detection using the OpenCV function .- Implement our own version of the Harris detector as well as
Shi-Tomasi
the detector by using the two functions above. - Example of use:
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
Mat src, src_gray;
Mat myHarris_dst, myHarris_copy, Mc;
Mat myShiTomasi_dst, myShiTomasi_copy;
int myShiTomasi_qualityLevel = 50;
int myHarris_qualityLevel = 50;
int max_qualityLevel = 100;
double myHarris_minVal, myHarris_maxVal;
double myShiTomasi_minVal, myShiTomasi_maxVal;
RNG rng(12345);
const char* myHarris_window = "My Harris corner detector";
const char* myShiTomasi_window = "My Shi Tomasi corner detector";
void myShiTomasi_function( int, void* );
void myHarris_function( int, void* );
int main( int argc, char** argv )
{
CommandLineParser parser( argc, argv, "{@input | building.jpg | input image}" );
src = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if ( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
cvtColor( src, src_gray, COLOR_BGR2GRAY );
int blockSize = 3, apertureSize = 3;
cornerEigenValsAndVecs( src_gray, myHarris_dst, blockSize, apertureSize );
/* calculate Mc */
Mc = Mat( src_gray.size(), CV_32FC1 );
for( int i = 0; i < src_gray.rows; i++ )
{
for( int j = 0; j < src_gray.cols; j++ )
{
float lambda_1 = myHarris_dst.at<Vec6f>(i, j)[0];
float lambda_2 = myHarris_dst.at<Vec6f>(i, j)[1];
Mc.at<float>(i, j) = lambda_1*lambda_2 - 0.04f*((lambda_1 + lambda_2) * (lambda_1 + lambda_2));
}
}
minMaxLoc( Mc, &myHarris_minVal, &myHarris_maxVal );
/* Create Window and Trackbar */
namedWindow( myHarris_window );
createTrackbar( "Quality Level:", myHarris_window, &myHarris_qualityLevel, max_qualityLevel, myHarris_function );
myHarris_function( 0, 0 );
cornerMinEigenVal( src_gray, myShiTomasi_dst, blockSize, apertureSize );
minMaxLoc( myShiTomasi_dst, &myShiTomasi_minVal, &myShiTomasi_maxVal );
/* Create Window and Trackbar */
namedWindow( myShiTomasi_window );
createTrackbar( "Quality Level:", myShiTomasi_window, &myShiTomasi_qualityLevel, max_qualityLevel, myShiTomasi_function );
myShiTomasi_function( 0, 0 );
waitKey();
return 0;
}
void myShiTomasi_function( int, void* )
{
myShiTomasi_copy = src.clone();
myShiTomasi_qualityLevel = MAX(myShiTomasi_qualityLevel, 1);
for( int i = 0; i < src_gray.rows; i++ )
{
for( int j = 0; j < src_gray.cols; j++ )
{
if( myShiTomasi_dst.at<float>(i,j) > myShiTomasi_minVal + ( myShiTomasi_maxVal - myShiTomasi_minVal )*myShiTomasi_qualityLevel/max_qualityLevel )
{
circle( myShiTomasi_copy, Point(j,i), 4, Scalar( rng.uniform(0,256), rng.uniform(0,256), rng.uniform(0,256) ), FILLED );
}
}
}
imshow( myShiTomasi_window, myShiTomasi_copy );
}
void myHarris_function( int, void* )
{
myHarris_copy = src.clone();
myHarris_qualityLevel = MAX(myHarris_qualityLevel, 1);
for( int i = 0; i < src_gray.rows; i++ )
{
for( int j = 0; j < src_gray.cols; j++ )
{
if( Mc.at<float>(i,j) > myHarris_minVal + ( myHarris_maxVal - myHarris_minVal )*myHarris_qualityLevel/max_qualityLevel )
{
circle( myHarris_copy, Point(j,i), 4, Scalar( rng.uniform(0,256), rng.uniform(0,256), rng.uniform(0,256) ), FILLED );
}
}
}
imshow( myHarris_window, myHarris_copy );
}
4. Use sub-pixel precision
- Use the OpenCV function cv::cornerSubPix to find a more precise corner position (more precise than integer pixels).
- Example of use:
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
Mat src, src_gray;
int maxCorners = 10;
int maxTrackbar = 25;
RNG rng(12345);
const char* source_window = "Image";
void goodFeaturesToTrack_Demo( int, void* );
int main( int argc, char** argv )
{
CommandLineParser parser( argc, argv, "{@input | pic3.png | input image}" );
src = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
cvtColor( src, src_gray, COLOR_BGR2GRAY );
namedWindow( source_window );
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
imshow( source_window, src );
goodFeaturesToTrack_Demo( 0, 0 );
waitKey();
return 0;
}
void goodFeaturesToTrack_Demo( int, void* )
{
maxCorners = MAX(maxCorners, 1);
vector<Point2f> corners;
double qualityLevel = 0.01;
double minDistance = 10;
int blockSize = 3, gradientSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
Mat copy = src.clone();
goodFeaturesToTrack( src_gray,
corners,
maxCorners,
qualityLevel,
minDistance,
Mat(),
blockSize,
gradientSize,
useHarrisDetector,
k );
cout << "** Number of corners detected: " << corners.size() << endl;
int radius = 4;
for( size_t i = 0; i < corners.size(); i++ )
{
circle( copy, corners[i], radius, Scalar(rng.uniform(0,255), rng.uniform(0, 256), rng.uniform(0, 256)), FILLED );
}
namedWindow( source_window );
imshow( source_window, copy );
Size winSize = Size( 5, 5 );
Size zeroZone = Size( -1, -1 );
TermCriteria criteria = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 40, 0.001 );
cornerSubPix( src_gray, corners, winSize, zeroZone, criteria );
for( size_t i = 0; i < corners.size(); i++ )
{
cout << " -- Refined Corner [" << i << "] (" << corners[i].x << "," << corners[i].y << ")" << endl;
}
}
reference
1. https://blog.csdn.net/fengweichangzi/article/details/119001661?spm=1001.2014.3001.5506
2. https://docs.opencv.org/4.x/d9/d97/tutorial_table_of_content_features2d.html