OpenCV - day 08 Template_Matching

template matching

  • The principle of template matching is very similar to that of convolution. The template slides from the origin on the original image, and the difference between the template and the covered part of the image is calculated. There are 6 calculation methods for this difference in OpenCV, and then the result of each calculation is Put it into a matrix and output it as the result. If the original image size is A * B, and the template is a * b size, the output matrix is ​​(A - a + 1) * ( B - b + 1)
import cv2
import matplotlib
import matplotlib.pyplot as plt #取别名(用于绘图展示)
import numpy as np #取别名,下面是notepad专用,立即显示图像
%matplotlib inline 
复制代码
def cv_show(name, img): #定义函数用于显示图片,此处name为窗口名,img为cv2调用imread方法的返回值
    cv2.imshow(name,img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
复制代码
def plt_show(images, titles):
    for i in range(len(images)):
        plt.subplot(351 + i)
        plt.imshow(cv2.cvtColor(images[i], 0))
        plt.title(titles[i])
复制代码
def winds(images):
    for i in range(len(images)):
        images[i] = cv2.resize(images[i], (0, 0), fx = 0.8, fy = 0.8) # 注意此处的resize用法!
    res = np.hstack(images)
    cv_show('Compare', res)
复制代码

Six parameters for template matching

  1. TM_SQDIFF: Calculate the square difference (variance?), the smaller the calculated value, the greater the degree of correlation between the template and the original image

  2. TM_CCORR: Calculate the correlation, the larger the calculated value, the greater the degree of correlation between the template and the original image

  3. TM_CCOEFF: Calculate the correlation coefficient. The larger the calculated value, the greater the correlation between the template and the original image.

  4. TM_SQDIFF_NORMED: The normalized square is calculated differently. The closer the calculated value is to 0, the greater the correlation between the template and the original image.

  5. TM_CCORR_NORMED: Calculate the normalized correlation. The closer the calculated value is to 1, the greater the correlation between the template and the original image.

  6. TM_CCOEFF_NORMED: Calculate the normalized correlation coefficient. The closer the calculated value is to 1, the greater the degree of correlation between the template and the original image.

  • In practice, it is best to use the normalized method

  • Matching process: "Seek according to the picture", divide the original image into regions of the same size as the template, and then compare the regions from left to right and top to bottom to see which region the template is located in. The sliding value is generally set to a pixel size

  • Difference quantization: the difference in gray value between corresponding pixels

# 转化成灰度图便于匹配
img = cv2.imread('lena.png', 0)
template = cv2.imread('face.png',0)

cv_show('lena', template)
h, w = template.shape[:2]# 读取宽高
复制代码
img.shape
复制代码
(512, 512)
复制代码
template.shape
复制代码
(157, 103)
复制代码
 methods = ['cv2.TM_CCOEFF','cv2.TM_CCOEFF_NORMED','cv2.TM_CCORR','cv2.TM_CCORR_NORMED',
           'cv2.TM_SQDIFF','cv2.TM_SQDIFF_NORMED']
复制代码
# TM_SQDIFF 计算的是“差异度”,所以越小越好
res = cv2.matchTemplate(img, template, 1) # 第三个参数是匹配方法, cv2.TM_SQDIFF = 1
res.shape # 按像素移动,第一个像素要算入,所以结果为(A - a + 1, B - b + 1)
复制代码
(356, 410)
复制代码
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
复制代码
min_val
复制代码
0.0
复制代码
max_val
复制代码
0.4685695171356201
复制代码
min_loc
复制代码
(250, 233)
复制代码
max_loc
复制代码
(67, 355)
复制代码

Effect demonstration

The template image below is slightly larger than the face in the original image

for meth  in methods:
    img2 = img.copy()
    
    # 匹配方法的真实值,不是字符串
    method = eval(meth) # 转化
    print(method)
    res = cv2.matchTemplate(img2, template, method)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    
    # 若是方差匹配,cv2.TM_SQDIFF,cv2.TM_SQDIFF_NORMED,取最小值,其他方法都取最大值
    if method in [cv2.TM_SQDIFF,cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
        
    bottom_right = (top_left[0] + w, top_left[1] + h)
    
    # 画矩形
    cv2.rectangle(img2, top_left, bottom_right, 255, 2)
    
    plt.subplot(121), plt.imshow(res, cmap = 'gray')
    plt.xticks([]), plt.yticks([]) # 隐藏坐标轴
    plt.subplot(122), plt.imshow(img2, cmap = 'gray')
    plt.xticks([]), plt.yticks([]) # 隐藏坐标轴
    plt.suptitle(meth)
    plt.show()
复制代码

The bright or dark spots in the image on the left are the best matching points.

4
复制代码

output_18_1.png

5
复制代码

output_18_3.png

2
复制代码

output_18_5.png

3
复制代码

output_18_7.png

0
复制代码

output_18_9.png

1
复制代码

output_18_11.png

Fix template image size

for meth  in methods:
    img2 = img.copy()
    
    # 匹配方法的真实值,不是字符串
    method = eval(meth) # 转化
    print(method)
    res = cv2.matchTemplate(img2, template, method)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    
    # 若是方差匹配,cv2.TM_SQDIFF,cv2.TM_SQDIFF_NORMED,取最小值,其他方法都取最大值
    if method in [cv2.TM_SQDIFF,cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
        
    bottom_right = (top_left[0] + w, top_left[1] + h)
    
    # 画矩形
    cv2.rectangle(img2, top_left, bottom_right, 255, 2)
    
    plt.subplot(121), plt.imshow(res, cmap = 'gray')
    plt.xticks([]), plt.yticks([]) # 隐藏坐标轴
    plt.subplot(122), plt.imshow(img2, cmap = 'gray')
    plt.xticks([]), plt.yticks([]) # 隐藏坐标轴
    plt.suptitle(meth)
    plt.show()
复制代码
4
复制代码

output_20_1.png

5
复制代码

output_20_3.png

2
复制代码

output_20_5.png

3
复制代码

output_20_7.png

0    
复制代码

output_20_9.png

1
复制代码

output_20_11.png

  • It can be seen from the above that the matching effect with normalization is good

match multiple objects

SM = cv2.imread('SM.png', -1 ) # 读入彩色图像作为画布
sm = cv2.imread('SM.png', 0)
mary = cv2.imread('Mary.png',0)
brick = cv2.imread('brick.png',0)

h_m, w_m = mary.shape[:2]
h_b, w_b = brick.shape[:2]
cv_show('SM', sm)
cv_show('mary', mary)
cv_show('brick', brick)
复制代码
res_M = cv2.matchTemplate(sm, mary, cv2.TM_SQDIFF_NORMED)
res_B = cv2.matchTemplate(sm, brick, cv2.TM_CCOEFF_NORMED)
复制代码

detect Mary

# 取差异程度小于 0.05 的坐标
threshold = 0.05
# 因为要检测多个而非一个,所以不会取最值
loc = np.where(res_M <= threshold)

res1 = SM.copy() # 这句复制一定要放在循环外,因为放在里面最终只会得到一个检测框
for pt in zip(*loc[::-1]): # 逆序遍历
    bottom_right = (pt[0] + w_m , pt[1] + h_m)
    
    res1 = cv2.rectangle(res1, pt, bottom_right,(0 ,255,0), 2) # 和轮廓检测同理,只能在彩图中绘制彩线

    
cv_show('Mary_Dect', res1)
复制代码

Detect bricks

# 取匹配程度大于 0.8 的坐标
threshold = 0.8
# 因为要检测多个而非一个,所以不会取最值
loc = np.where(res_B >= threshold)

res2 = SM.copy() # 这句复制一定要放在循环外,因为放在里面最终只会得到一个检测框
for pt in zip(*loc[::-1]): # 逆序遍历
    bottom_right = (pt[0] + w_b , pt[1] + h_b)
    
    res2 = cv2.rectangle(res2, pt, bottom_right,(0 ,255,0), 2) # 和轮廓检测同理,只能在彩图中绘制彩线

    
cv_show('Brick_Dect', res2)
复制代码
winds([SM, res1, res2])
复制代码
复制代码

Final detection renderings:

DM.PNG

Guess you like

Origin juejin.im/post/7087480825289113636