OpenCV系列杂谈(四):特征检测算法

本节总结常见的特征检测算法:

1、Harris角点检测方法

import cv2
import numpy as np

img = cv2.imread('../../car.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 23, 0.04)
img[dst>0.01 * dst.max()] = [0, 0, 255]
cv2.imshow('corners', img)
cv2.waitKey(0)
cv2.destroyAllWindows()

效果图:

2、DoG和SIFT进行特征提取和描述

   注:需要安装opencv-contrib-python扩展模块,由于SIFT和SURF都受专利保护,因此,被归到OpenCV的xfeatures2d模型中。

   由于Harris角点检测技术对尺度变化的影响较大,所以提取使用SIFT尺度不变特征解决这个问题,对不同尺寸的图像会输出相同的结果。SIFT并不是检测关键点(关键点可由Difference of Gaussians检测)但SIFT会通过一个特征向量来描述关键点周围的区域情况。

   Difference of Gaussians (DOG)是对统一图像使用不同高斯滤波器所得到的结果,最终结果会得到感兴趣的区域(兴趣点)。

import cv2
import  sys
import numpy as np

# imgpath = sys.argv[1]
img = cv2.imread('../../car.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

sift = cv2.xfeatures2d.SIFT_create()
keypoints, descriptor = sift.detectAndCompute(gray, None)

img = cv2.drawKeypoints(image=img, outImage=img, keypoints=keypoints,
                        flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
                        color=(51, 163, 236))
cv2.imshow('sift_keypoints', img)
cv2.waitKey(0)
cv2.destroyAllWindows()

效果图:

3、快速Hession算法和SURF来提取和检测特征

   SURF特征检测算法由Herbert Bay于2006年发表,该算法比SIFT快好几倍,它吸收了SIFT算法的思想。由于SIFT和SURF都受专利保护,因此,被归到OpenCV的xfeatures2d模型中。

import cv2
import sys
import numpy as np

img = cv2.imread('../../raw1.jpg')
alg = "SURF"

def fd(algorithm):
  algorithms = {
    "SIFT": cv2.xfeatures2d.SIFT_create(),
    "SURF": cv2.xfeatures2d.SURF_create(float(sys.argv[3]) if len(sys.argv) == 4 else 4000),
    "ORB": cv2.ORB_create()
  }
  return algorithms[algorithm]

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

fd_alg = fd(alg)
keypoints, descriptor = fd_alg.detectAndCompute(gray,None)

img = cv2.drawKeypoints(image=img, outImage=img, keypoints = keypoints, flags = 4, color = (51, 163, 236))

cv2.imshow('keypoints', img)
while (True):
  if cv2.waitKey(int(1000 / 12)) & 0xff == ord("q"):
    break
cv2.destroyAllWindows()

4、FAST(Features from Accelerated Segment Test)算法在像素周围绘制一个圆,该圆包括16个像素,这是一种不错的方法。FAST会将圆内的每个像素加上一个阈值与圆心像素进行比较,若有连续、比加上一个阈值的圆心的像素值还亮或暗的像素,则可认为圆心是角点。

# -*- coding: utf-8 -*-
'''
用于角点检测的Fast算法
'''
import cv2
import numpy as np
from matplotlib import pyplot as plt

img = cv2.imread('../../raw1.jpg', 0)

fast = cv2.FastFeatureDetector_create()
kp = fast.detect(img, None)
img2 = cv2.drawKeypoints(img, kp, None, color=(255, 0, 0))
cv2.imwrite('fast_true.jpg', img2)

# 输出所有默认参数
print('help of fast: ', help(fast))
print("Threshold: ", fast.getThreshold())
print("nonmaxSuppression", fast.getNonmaxSuppression())

fast.setNonmaxSuppression(0)
kp = fast.detect(img, None)
img3 = cv2.drawKeypoints(img, kp, None, color=(0, 255, 0))
cv2.imwrite('fast_false.jpg', img3)

res = cv2.bitwise_and(img2, img3)
res = np.hstack([img2, img3, res])
cv2.imshow('res', res)

cv2.waitKey(0) & 0xFF
cv2.destroyAllWindows()

效果图:第一张图片显示了使用nonmaxSuppression的FAST和没有nonmaxSuppression的第二张图片,第三张图片是前两张做与运算

5、ORB特征匹配算法

   ORB特征匹配算法,旨在优化和加快操作速度,包括非常重要的一步:旋转感知方式使用BRIEF

import numpy as np
import  cv2
from matplotlib import pyplot as plt


img1 = cv2.imread('', cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread('', cv2.IMREAD_GRAYSCALE)

orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x:x.distance)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:40], img2, flags=2)
plt.imshow(img3), plt.show()

6、FLANN匹配

import numpy as np
import cv2
from matplotlib import pyplot as plt

queryImage = cv2.imread('images/bathory_album.jpg',0)
trainingImage = cv2.imread('images/vinyls.jpg',0)

# create SIFT and detect/compute
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(queryImage,None)
kp2, des2 = sift.detectAndCompute(trainingImage,None)

# FLANN matcher parameters
# FLANN_INDEX_KDTREE = 0
indexParams = dict(algorithm = 0, trees = 5)
searchParams = dict(checks=50)   # or pass empty dictionary

flann = cv2.FlannBasedMatcher(indexParams,searchParams)

matches = flann.knnMatch(des1,des2,k=2)

# prepare an empty mask to draw good matches
matchesMask = [[0,0] for i in xrange(len(matches))]

# David G. Lowe's ratio test, populate the mask
for i,(m,n) in enumerate(matches):
    if m.distance < 0.7*n.distance:
        matchesMask[i]=[1,0]

drawParams = dict(matchColor = (0,255,0),
                   singlePointColor = (255,0,0),
                   matchesMask = matchesMask,
                   flags = 0)

resultImage = cv2.drawMatchesKnn(queryImage,kp1,trainingImage,kp2,matches,None,**drawParams)

plt.imshow(resultImage,),plt.show()

猜你喜欢

转载自blog.csdn.net/mago2015/article/details/81395451