项目2:实现对两幅图片的图像拼接 (Image stitching)

代码如下(示例):

# 图像拼接
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
import time


MIN = 10
start_time = time.time()
m1 = cv.imread('Pic1.png')
img2 = cv.imread('Pic2.png')
# cv.imshow("m1",m1)
# cv.imshow("img2",img2)
height2 = int(img2.shape[0])
width2 = int(img2.shape[1])
dim = (width2, height2)

img1 = cv.resize(m1, dim, interpolation=cv.INTER_AREA)

gray1 = cv.cvtColor(img1, cv.COLOR_RGB2GRAY)
gray2 = cv.cvtColor(img2, cv.COLOR_RGB2GRAY)

print('img1 Dimensions : ', img1.shape)
print('img2 Dimensions : ', img2.shape)
plt.imshow(img1, ), plt.show()
plt.imshow(img2, ), plt.show()

# SURF
surf = cv.xfeatures2d.SURF_create(10000, nOctaves=4, extended=False, upright=True)
gray1 = cv.cvtColor(img1, cv.COLOR_RGB2GRAY)
gray2 = cv.cvtColor(img2, cv.COLOR_RGB2GRAY)

kp1, describe1 = surf.detectAndCompute(gray1, None)
kp2, describe2 = surf.detectAndCompute(gray2, None)

# FLANN
FLANN_INDEX_KDTREE = 0
indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
searchParams = dict(checks=50)

flann = cv.FlannBasedMatcher(indexParams, searchParams)
match = flann.knnMatch(describe1, describe2, k=2)

good = []
for i, (m, n) in enumerate(match):
    if m.distance < 0.75 * n.distance:
        good.append(m)
##################################
# RANSAC:findhomography
if len(good) > MIN:
    src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
    ano_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
    M, mask = cv.findHomography(src_pts, ano_pts, cv.RANSAC, 5.0)
    warpImg = cv.warpPerspective(img2, np.linalg.inv(M), (img1.shape[1] + img2.shape[1], img2.shape[0]))
    direct = warpImg.copy()
    direct[0:img1.shape[0], 0:img1.shape[1]] = img1
    simple = time.time()
###################################

    # cv.namedWindow("Result", cv.WINDOW_NORMAL)
    # cv.imshow("Result",warpImg)
    rows, cols = img1.shape[:2]

    left = 0
    right = cols

    for col in range(0, cols):
        if img1[:, col].any() and warpImg[:, col].any():  # 开始重叠的最左端
            left = col
        break

    for col in range(cols - 1, 0, -1):
        if img1[:, col].any() and warpImg[:, col].any():  # 重叠的最右一列
            right = col
        break

    res = np.zeros([rows, cols, 3], np.uint8)

    for row in range(0, rows):
        for col in range(0, cols):
            if not img1[row, col].any():
                res[row, col] = warpImg[row, col]
            elif not warpImg[row, col].any():
                res[row, col] = img1[row, col]
            else:
                srcImgLen = float(abs(col - left))
                testImgLen = float(abs(col - right))
                alpha = srcImgLen / (srcImgLen + testImgLen)
                res[row, col] = np.clip(img1[row, col] * (1 - alpha) + warpImg[row, col] * alpha, 0, 255)

    warpImg[0:img1.shape[0], 0:img1.shape[1]] = res
    final = time.time()
    img3 = cv.cvtColor(direct, cv.COLOR_BGR2RGB)
    plt.imshow(img3, ), plt.show()
    img4 = cv.cvtColor(warpImg, cv.COLOR_BGR2RGB)
    plt.imshow(img4, ), plt.show()
    print("simple stitch cost %f" % (simple - start_time))
    print("\n total cost %f" % (final - start_time))
    # cv.imwrite("simpletons.png", direct)
    # cv.imwrite("bestowal.png", warpImg)
    cv.imshow("pictures", img4)
    cv.waitKey()
    cv.destroyAllWindows()

else:
    print("not enough matches!")

猜你喜欢

转载自blog.csdn.net/qq_45825952/article/details/124986848