Issues about feature point matching and positioning in Opencv (1) DMatch analysis

Problems with feature point matching and positioning in Opencv

review

After we detect the feature points, we usually perform feature point matching.
First, let's review using the Brute-Force matcher for matching.

import cv2
import numpy as np
import matplotlib.pyplot as plt

#读取图片
img=cv2.imread('./newmm.png')
tem=cv2.imread('./templa.png')

#创建特征点检测器
orb=cv2.ORB_create()
#创建BF特征点匹配器
bf=cv2.BFMatcher_create()

#检测原图特征点
kp1,des1=orb.detectAndCompute(img,mask=None)
#检测模板图特征点
kp2,des2=orb.detectAndCompute(tem,mask=None)
#进行匹配
res=bf.match(des1,des2)
#将匹配点按照距离排序
res=sorted(res,key=lambda x:x.distance)
#将最相近的10个点绘画出来
newimg=cv2.drawMatches(img,kp1,tem,kp2,res[:10],None)
#绘制原图特征点
img=cv2.drawKeypoints(img,kp1,None,color=[0,0,255])
tem=cv2.drawKeypoints(tem,kp2,None,color=[0,255,0])
#显示图片
def imshow(img,axis,title=None):
    axis=str(axis[0])+str(axis[1])
    for i in range(len(img)):
        plt.subplot(int(axis + str(i+1)))
        if title:
            plt.title(title[i])
        i=cv2.cvtColor(img[i],cv2.COLOR_BGR2RGB)
        plt.imshow(i)
    plt.show()

all_img=[img,tem,newimg]
all_title=['img','tem','newimg']
imshow(all_img,[2,2],all_title)

Then the effect is as follows:
Insert image description here

position

Then the question arises, how to position the coordinates after the detection is completed.
First we have to look at the results generated by bf.match:

res=bf.match(des1,des2)
print(res)
(<DMatch 000001B9FFB81A50>, <DMatch 000001B9FFB81A70>, <DMatch 000001B9FFB81A30>, <DMatch 000001B9FFB819F0>.....<class 'cv2.DMatch'>

You can see that what is generated is <DMatch 000001B9FFB81A50>, which is an object of type <class 'cv2.DMatch'>.
Then we can still access the properties of the object through dir(res[0]).

['__class__',
 '__delattr__',
 '__dir__',
 '__doc__',
 '__eq__',
 '__format__',
 '__ge__',
 '__getattribute__',
 '__gt__',
 '__hash__',
 '__init__',
 '__init_subclass__',
 '__le__',
 '__lt__',
 '__module__',
 '__ne__',
 '__new__',
 '__reduce__',
 '__reduce_ex__',
 '__repr__',
 '__setattr__',
 '__sizeof__',
 '__str__',
 '__subclasshook__',
 'distance',
 'imgIdx',
 'queryIdx',
 'trainIdx']

Then print out what you need'distance','imgIdx' 'queryIdx', 'trainIdx':

print(res[0].distance)
print(res[0].imgIdx)
print(res[0].queryIdx)
print(res[0].trainIdx)
0.0
0
0
13

But when I looked at it, I found that I was completely confused. Onlyres[0].distance knew the meaning. So I can only check it out on the official website of opencv
Insert image description here
Due to my limited English proficiency and too simple description, I can only guess based on my feelings.
Since the matching is one point to one point, trainldx and queryldx must be two points, so let’s click below first. Give it a try.

project Value
trainIdx Match the feature point index corresponding to the original image
queryIdx Match the feature point index corresponding to the template image
imgIdx Index of pictures

After having this, I tried it myself. Since the matching results are sorted, that is, the front ones are more accurate, so I tried directly with the front ones.
The idea is like this, use the first ten res of the two pictures (original picture and template picture) to obtain the minimum coordinates and maximum coordinates (that is, the upper left corner and upper right corner of the box), and then Draw it and see if the position of the picture frame is the same twice.
The code is as follows:

import cv2
import numpy as np
import matplotlib.pyplot as plt

#读取图片
img=cv2.imread('./newmm.png')
tem=cv2.imread('./templa.png')

#创建特征点检测器
orb=cv2.ORB_create()
#创建BF特征点匹配器
bf=cv2.BFMatcher_create()

#检测原图特征点
kp1,des1=orb.detectAndCompute(img,mask=None)
#检测模板图特征点
kp2,des2=orb.detectAndCompute(tem,mask=None)
#进行匹配
res=bf.match(des1,des2)

#将匹配点按照距离排序
res=sorted(res,key=lambda x:x.distance)

def get_rect(res,kp,idx=0):
    point_img=[]
    #获得前十res个对应的点
    for i in res[:10]:
        if idx==0:
            center=cv2.KeyPoint_convert(kp,keypointIndexes=[i.queryIdx])
        elif idx==1:
            center = cv2.KeyPoint_convert(kp, keypointIndexes=[i.trainIdx])
        center=[int(np.ravel(center)[0]),int(np.ravel(center)[1])]
        point_img.append(center)

    #获得框的左上角点和右下角点
    minres=np.argmin(point_img,axis=0)
    maxres=np.argmax(point_img,axis=0)
    minpoint=[point_img[minres[0]][0],point_img[minres[1]][1]]
    maxpoint=[point_img[maxres[0]][0],point_img[maxres[1]][1]]
    return minpoint,maxpoint

min1,max2=get_rect(res,kp1,0)
min3,max4=get_rect(res,kp2,1)

cv2.rectangle(tem,min3,max4,[255,0,0],4,16)
cv2.rectangle(img,min1,max2,[255,0,0],4,16)

#将最相近的10个点绘画出来
newimg=cv2.drawMatches(img,kp1,tem,kp2,res[:10],None)
#绘制原图特征点
img=cv2.drawKeypoints(img,kp1,None,color=[0,0,255])
tem=cv2.drawKeypoints(tem,kp2,None,color=[0,255,0])
#显示图片
def imshow(img,axis,title=None):
    axis=str(axis[0])+str(axis[1])
    for i in range(len(img)):
        plt.subplot(int(axis + str(i+1)))
        if title:
            plt.title(title[i])
        i=cv2.cvtColor(img[i],cv2.COLOR_BGR2RGB)
        plt.imshow(i)
    plt.show()

all_img=[img,tem,newimg]
all_title=['img','tem','newimg']
imshow(all_img,[2,2],all_title)

The effect is as follows:
Insert image description here
Insert image description here
Insert image description here
You can find that it is a complete match!!
I only need to draw the frame of the original image to detect the target

Issues about feature point matching and positioning in Opencv (2) Single target and multi-target positioning

Guess you like

Origin blog.csdn.net/darlingqx/article/details/128263432