Opencv template matching and application normalization to find the most value

1) Template matching
Template matching is a technique of finding the most matching (similar) part of an image to another template image. Template matching is not based on histograms, but by sliding image blocks (templates) in the input image. ) at the same time compare the similarity, a method to match the template and the input image
Application :
1. Target finding and positioning
2. Moving object tracking
3 others. . .
Because it is template matching, the inversion tilt similarity will be much worse. It is
not suitable for angle and search. It
is not suitable for scale transformation.

matchTemplate(InputArray image,InputArray temp1,OutputArray result,int method);
&&image : The image to be searched (large image)
&&templ: The search template needs the same data type as the original image and the size cannot be larger than the source image
&&result: The mapped image of the comparison result , and must be a single-channel, 32-bit floating-point image, if the size of the original image (image to be searched) is W*H, and the size of templ is w*h, the result size must be (W-w+1)*( H-h+1) Effective search area
&&method: The specified matching method, there are the following 6
CV_TM_SQDIFF-squared difference matching methods (the best match is 0)
to use the squared difference to match, the best match is 0, the worse the match, the match The larger the value,
CV_TM_SQIFF_NORMED – normalized squared difference matching method (best match 0)
CV_TM_CCORR – correlation matching method (worst match 0)
uses the multiplication operation between the template and the image, so a larger number indicates a higher degree of matching, 0 identifies the worst matching result
CV_TM - CCORR_NORMED - normalized correlation matching method (worst matching 0)
CV_TM_CCOFF - coefficient matching method (best matching 1)
This method is to align the correlation of the template alignment mean with the image The correlation of the mean is matched, 1 means a perfect match, -1 means a bad match, 0 means no correlation (random sequence)
CV_TM_CCOEFF_NORMED – Correlation coefficient matching method (best match 1)
ex1:
resultImg.create(resultImg_cols,resultImg_rows,CV_32FC1);
matchTemplate(srcImg,templateImg,resultimg,CV_TM_SQDIFF);

In general, as we go from a simple measure (squared difference) to a more complex measure (correlation coefficient) we can get more and more accurate matches (which also means more and more computationally expensive) the best way to do this is for all Do some more testing experiments with these settings, taking into account both speed and accuracy

Matrix normalization—normalize()
void normalize(InuputArray src,OutputArray dst,double alpha,double beta=0,int norm_type=NORM_L2,int dtype=-1,InputArray mask=noArray())
&& src: input original image, Mat type
&& dst: output result image, it needs the same size and size as the original image
&& alpha: normalized minimum value, default value 1
&& beta: normalized maximum value, default value 0
&& norm_type: normalized Normalization type, optional NORM_INF, NORM_L1, NORM_L2 (default), etc.
&& dtype: default value -1, when the secondary parameter is negative, the output matrix and src have the same type
&&mask ; optional mask operation
ex1:
normalize(srcImg ,resultImg,0,1,NORM_MINMAX,-1);
the role is to normalize the matrix

———————————————————————————--find the maximum value minMaxLoc()
void minMaxLoc(InputArray src, CV_OUT double * minVal, CV_OUT double *maxVal =0,CV_OUT Point*imLoc=0,CV_OUT Point *maxLoc=0,InputArray mask=noArrat())
&&src: input original image, single-channel image
&&minVal: return the pointer of the minimum value, if there is no need to return, set 0
&&maxVal : Return the pointer of the maximum value, if there is no need to return, set 0
&&minLoc: return the pointer of the minimum position, if there is no need to return, set 0
&&maxLoc: return the pointer of the maximum position, if there is no need to return, set 0
&&mask: OK Selected mask operation
ex1:
Mat resultImg;
double minValue,maxValue;
Point minLoc,maxLoc;
Point matchLoc;
minMaxLoc(resultImg,&minValue,&maxValue,&minLoc,&maxLoc); The
function of the function is to find the global minimum and maximum values ​​in the array

#include "opencv2/opencv.hpp"
using namespace cv;
void mian()
{
//单模板匹配
  Mat temp=imread("temp.png");//模板图像
  Mat src=imread("src.png");//待搜索图像 
  Mat dst=src.clone();//原图的备份
  int width=src.cols-temp.cols+1;//result 宽度
  int height=src.rows-temp.rows+1;//result 高度

  Mat result(height,width,CV_32FC1);//创建结果映射图像

  matchTemplate(sarc,temp,resultCV_TM_CCOEDD_NORMED);//化相关系数匹配最佳值1
  imshow("result",result);//预览映射图像
  normalize(result,result,0,1,NORM_MINMAX,-1);//归一化0到1
  double minValue,maxValue;
  Point minLoc,maxLoc;
  minMaxLoc(result,&minValue,&maxValue,&minLoc,&maxLoc,Mat());
  rectangle(dst,maxLoc,Point(maxLoc.x+temp.cols,maxLoc.y+temp.rows),Scalar(0,255,0),2,8);
 inshow("dst",dst);
 waitKey(0);
 }

//Recognition of moving objects

Mat frame;
Mat templateImg=imread("blue,jpg");
Mat resulting;
VideoCapture cap("1.mp4");
if(!cap.isOpened())
  return;
int resultImg_cols;
int resultImg_rows;
while(1)
{
//利用截图工具来截模板
 cap>>frame;
 if(frame.empty())
   break;
 Mat showImg.clone();
 resultImg_cols=frame.cols-templateImg.cols+1;
 resultImg_rows=frame.rows-templateImg.rows+1;
 resultImg.create(resultImg_cols,resultImg_rows,CV_32FC1);
 matchTemplate(frame,templateImg,resultImg,CV_TM_CCOEFF_NORMED);
 normalize(resultImg,resultImg,0,1,NORM_MINMAX,-1);
 double minValue,maxValue;
 Point minLoc,maxLoc;
 Point matchLoc;
 minMaxLoc(resultImg,&minValue,&maxValue,&minLOc,&macLoc);
 cout<<"max_value="<<maxValue<<endl;
 cout<<"min_value="<<minValue<<endl;
 if(maxValue>=0.7)//设置个阈值
    rectangle(showImg,maxLoc,Point(maxLoc.x+templateImg.cols,maxLoc.y+tempLateImg.rows),Scalar(0,255,0),2);
    imshow("frame",frame);
    imshow("result",showImg);
    if(27==waitKey(20))
    break;
}

//Multiple template matching

double matchValue;
int count0=0;
int tempW=0;tempH=0;
char matchRate[10];
for(int i=0;i<resultImg_rows;i++)
{
  for(int j=0;j<resultImg_cols;j++)
  {
   matchValue=resultImg.at<float>(i,j);
   sprintf(matchRate,"%0.2f",matchValue);
   if(matchValue>=0.95&&(abs(j-tempW)>1)&&(abs(i-tempH)>1))
   {
     count0++;
     putText(showImg,matchRate,Point(j-5,j-5),CV_FONT_HERSHEY_COMPLEX,1Scalar(0,0,255),1);
     rectangle(showing,Point(j,i),Point(j+templateImg.cols,i+templateImg.rows),Scalar(0,255,0),2);
     tempW=j;
     tempH=i;
   }
  }
}

//Multi-target template matching

double minValue,maxValue;
Point minLoc,maxLoc;
Point matchLoc;
char matchRate[10];
minMaxLoc(resultImg,&minValue,&maxValue,&minLoc,&macLoc);
count<<"max_Value="<<maxValue<<endl;
sprintf(matchRate,"%0.2f",maxValue);
putText(showImg,matchRate,Point(maxLoc.x-5,maxLoc.y-5),CV_FONT_HERSHEY_COMPLEX,1,Scalar(0,0,255),1);
rectanle(showImg,maxLoc,Point(maxLoc.x+templateImg.cols,maxLoc.y+templateImg.rows),Scalar(0,255,0),2);
for(int i=0;i<100;i++)
{
    int startX=maxLoc.x-20;
    int startY=maxLoc.y-20;
    int endX=maxLoc.x+20;
    int endY=maxLoc.y+20;
    if(startX<0||startY<0)
    {
      startX=0;
      startY=0;
    }
    if(endx>resultImg.cols-1||endY>resultImg.rows-1)
    {
     endX=resultImg.cols-1;
     endY=resultImg.rows-1;
    }
    Mat temp=Mat::zeros(endX-startX,endY-startY,CV_32FC1);
    temp.copyTo(resultImg(Rect(startx,starty,temp.cols,temp.rows));
    minMaxLox(resultImg,&minValue,&maxValue,&minLoc,&maxLoc);
    if(maxValue<0.5)
    break;
    snprintf(matchRate,"%0.2f",maxValue);
    putText(showImg,matchRate,Point(maxLoc.x-5,maxLoc.y-5),CV_FONT_HERSHEY_COMPLEX,1,Scalar(0,255,0),1);
    rectangle(showImg,maxLoc,Point(maxLoc.x+tempLateImg.cols,maxLoc.y+tempLateImg.rows),Scalar(0,255,0),2);
    }
    imshow("midImg",midImg);
    imshow("resultImg",resultImg);
    imshow("dst",showImg);
    waitKey(0);

}

Guess you like

Origin http://43.154.161.224:23101/article/api/json?id=324377440&siteId=291194637