python opencv 已知图片和mask,抠出mask区域

目录

前言

目标

思路

完整代码

Inference

交流


前言

        在ICDAR2021竞赛中,需要取出图片中需要分割的样本,对样本进行仿射变换,背景替换,生成更丰富的样本。

目标

        结合mask取出input中的两张卡片

         

                 input                                        mask

 

                                            cards

思路

1.利用cv2.findcontours函数检测物体的轮廓;

2.利用cv2.contourArea函数筛除不合格的轮廓点集;

3.利用cv2.approxPolyDP函数求检测到的轮廓点集中的顶点坐标;

4.有了card的四个顶点,就可以求出card近似的的边长和宽,利用cv2.getPerspectiveTransform函数求出仿射变换矩阵M;

5.利用cv2.warpPerspective函数将input图片中的card提出;

完整代码

#-*- coding:utf-8 -*-
import cv2
import numpy as np
import os
from tqdm import tqdm


def order_points(pts):
    ''' sort rectangle points by clockwise '''
    sort_x = pts[np.argsort(pts[:, 0]), :]
    Left = sort_x[:2, :]
    Right = sort_x[2:, :]
    # Left sort
    Left = Left[np.argsort(Left[:, 1])[::-1], :]
    # Right sort
    Right = Right[np.argsort(Right[:, 1]), :]
    return np.concatenate((Left, Right), axis=0)


def get_doc_area(imgs_root, masks_root, dst_path, dst_sub_dirs):

    imgs_nm = os.listdir(imgs_root)
    masks_nm = os.listdir(masks_root)
    for sub_dir in dst_sub_dirs:
        if not os.path.exists(os.path.join(dst_path, sub_dir)):
            os.makedirs(os.path.join(dst_path, sub_dir))
            print('make dir:{}'.format(os.path.join(dst_path, sub_dir)))

    for i in tqdm(range(len(imgs_nm))):
        nm_head = imgs_nm[i].split('_')[0]
        #print(nm_head)
        img_path = os.path.join(imgs_root, imgs_nm[i])
        mask_path = os.path.join(masks_root, masks_nm[i])
        img = cv2.imread(img_path)
        mask = cv2.imread(mask_path)

        gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
        ret, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)

        mask_, contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        for j, cnt in enumerate(contours):
            area = cv2.contourArea(cnt)
            if area < 0:
                continue
            epsilon = 0.005 * cv2.arcLength(cnt, True)
            approx = cv2.approxPolyDP(cnt, epsilon, True)
            approx = np.squeeze(approx)
            #print('approx_len:{}'.format(len(approx)))
            if len(approx) != 4:
                continue
            arbitrary_points = np.float32(order_points(approx))
            #print(arbitrary_points)
            h, w = arbitrary_points[0][1] - arbitrary_points[1][1], \
                   arbitrary_points[3][0] - arbitrary_points[0][0]
            rectangle = np.float32([[0, h], [0, 0], [w, 0], [w, h]])

            M = cv2.getPerspectiveTransform(arbitrary_points, rectangle)
            doc_dst = cv2.warpPerspective(img, M, (w, h))
            mask_dst = cv2.warpPerspective(mask, M, (w, h))

            padding = -1
            h, w = doc_dst.shape[:2]
            doc_dst = doc_dst[-padding:h+padding, -padding:w+padding]
            mask_dst = mask_dst[-padding:h+padding, -padding:w+padding]
            cv2.imwrite(os.path.join(dst_path, dst_sub_dirs[0], nm_head + '_doc_{}.jpg'.format(j)), doc_dst)
            cv2.imwrite(os.path.join(dst_path, dst_sub_dirs[1], nm_head + '_mask_{}.png'.format(j)), mask_dst)

            # cv2.imshow('doc_dst', doc_dst)
            # cv2.imshow('mask_dst', mask_dst)
            # cv2.waitKey(0)

if __name__=='__main__':
    imgs_root = r'G:\ICDAR\TrainVal\SBR-Doc-Dataset-V2\input'
    masks_root = r'G:\ICDAR\TrainVal\SBR-Doc-Dataset-V2\edge_segmentation_gt'
    dst_path = r'G:\ICDAR\Gen\csdn'
    dst_sub_dirs = ['docs', 'edge']
    get_doc_area(imgs_root, masks_root, dst_path, dst_sub_dirs)

Inference

1.矩形框四个顶点顺时针排序,https://blog.csdn.net/qq_36560894/article/details/113092614

2.cv2.findContours,https://blog.csdn.net/hjxu2016/article/details/77833336/

3.cv2.approxPolyDP,https://vimsky.com/examples/detail/python-method-cv2.approxPolyDP.html

4.仿射变换,https://www.cnblogs.com/pheasant/p/13226182.html

交流

        欢迎大家进群交流学习

猜你喜欢

转载自blog.csdn.net/qq_36076233/article/details/118380676
今日推荐