【surf算法例子分析】

# -*- coding: utf-8 -*-
"""
Created on Mon Mar 11 17:05:53 2019

@author: zzx
"""

from matplotlib import pyplot as plt
from imagedt.decorator import time_cost
import cv2
print('cv version: ', cv2.__version__)
#给出opencv的版本

def bgr_rgb(img):
    (r, g, b) = cv2.split(img)      #将红绿蓝三种颜色从图片中分离出来(显示白色)并分别赋给r,g,b
    return cv2.merge([b, g, r])  #合并b,g,r单通道成多通道(依旧是彩图)

#找出两幅图100个最匹配的点,并返回经过R,B,G分离单通道后又合并的多通道图片(大概是把匹配点的颜色标出来???)

**#其实根本就没用,因为后面实际在调用的是sift算法的DetectandCompute**

def orb_detect(image_a, image_b):
    # feature match
    orb = cv2.ORB_create()  #构建金字塔生成多尺度特征

    # find the keypoints and descriptors with SIFT
    kp1, des1 = orb.detectAndCompute(image_a, None)
    kp2, des2 = orb.detectAndCompute(image_b, None) 
    #提取特征点

    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    #BF匹配器,第一个参数是制定要用的距离量度,第二个参数是布尔变量,true为匹配器返回那些和(i,j)匹配的,这样集合A 里的第i个描述子和集合B 里的第j个描述子最匹配

    # Match descriptors.
    matches = bf.match(des1, des2)
    #返回一个(des1,des2)最匹配的描述子

    # Sort them in the order of their distance.  
     #我们按他们距离升序排列,这样最匹配的(距离最小)在最前面
    matches = sorted(matches, key=lambda x: x.distance)
 
    # Draw first 10 matches.
     #我们画出最开始的100个匹配
    img3 = cv2.drawMatches(image_a, kp1, image_b, kp2, matches[:100], None, flags=2)
   
    return bgr_rgb(img3)

@time_cost
def sift_detect(img1, img2, detector='surf'):
    if detector.startswith('si'):
        print("sift detector......")
        sift = cv2.xfeatures2d.SURF_create()
    else:
        print("surf detector......")
        sift = cv2.xfeatures2d.SURF_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2) 
    #返回K个最匹配的,K=2

    # Apply ratio test
    #只要好的匹配
    good = [[m] for m, n in matches if m.distance < 0.5 * n.distance]

    # cv2.drawMatchesKnn expects list of lists as matches.
    img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)

    return bgr_rgb(img3)


if __name__ == "__main__":
    # load image
    image_a = cv2.imread('./img1.jpg')
    image_b = cv2.imread('./img2.png')

    # ORB
    # img = orb_detect(image_a, image_b)

    # SIFT or SURF
    img = sift_detect(image_a, image_b)

    plt.imshow(img)
    plt.show()

猜你喜欢

转载自blog.csdn.net/weixin_44190201/article/details/88549521