Python opencv knows the picture and mask, and cuts out the mask area

Table of contents

foreword

Target

train of thought

full code

Inference

comminicate


foreword

        In the ICDAR2021 competition, it is necessary to take out the samples that need to be segmented in the picture, perform affine transformation on the samples, and replace the background to generate more abundant samples.

Target

        Combine the mask to take out the two cards in the input

         

                 input                                        mask

 

                                            cards

train of thought

1. Use the cv2.findcontours function to detect the contour of the object;

2. Use the cv2.contourArea function to screen out unqualified contour point sets;

3. Use the cv2.approxPolyDP function to find the vertex coordinates in the detected contour point set;

4. With the four vertices of the card, the approximate side length and width of the card can be obtained, and the affine transformation matrix M can be obtained by using the cv2.getPerspectiveTransform function;

5. Use the cv2.warpPerspective function to propose the card in the input image;

full code

#-*- coding:utf-8 -*-
import cv2
import numpy as np
import os
from tqdm import tqdm


def order_points(pts):
    ''' sort rectangle points by clockwise '''
    sort_x = pts[np.argsort(pts[:, 0]), :]
    Left = sort_x[:2, :]
    Right = sort_x[2:, :]
    # Left sort
    Left = Left[np.argsort(Left[:, 1])[::-1], :]
    # Right sort
    Right = Right[np.argsort(Right[:, 1]), :]
    return np.concatenate((Left, Right), axis=0)


def get_doc_area(imgs_root, masks_root, dst_path, dst_sub_dirs):

    imgs_nm = os.listdir(imgs_root)
    masks_nm = os.listdir(masks_root)
    for sub_dir in dst_sub_dirs:
        if not os.path.exists(os.path.join(dst_path, sub_dir)):
            os.makedirs(os.path.join(dst_path, sub_dir))
            print('make dir:{}'.format(os.path.join(dst_path, sub_dir)))

    for i in tqdm(range(len(imgs_nm))):
        nm_head = imgs_nm[i].split('_')[0]
        #print(nm_head)
        img_path = os.path.join(imgs_root, imgs_nm[i])
        mask_path = os.path.join(masks_root, masks_nm[i])
        img = cv2.imread(img_path)
        mask = cv2.imread(mask_path)

        gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
        ret, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)

        mask_, contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        for j, cnt in enumerate(contours):
            area = cv2.contourArea(cnt)
            if area < 0:
                continue
            epsilon = 0.005 * cv2.arcLength(cnt, True)
            approx = cv2.approxPolyDP(cnt, epsilon, True)
            approx = np.squeeze(approx)
            #print('approx_len:{}'.format(len(approx)))
            if len(approx) != 4:
                continue
            arbitrary_points = np.float32(order_points(approx))
            #print(arbitrary_points)
            h, w = arbitrary_points[0][1] - arbitrary_points[1][1], \
                   arbitrary_points[3][0] - arbitrary_points[0][0]
            rectangle = np.float32([[0, h], [0, 0], [w, 0], [w, h]])

            M = cv2.getPerspectiveTransform(arbitrary_points, rectangle)
            doc_dst = cv2.warpPerspective(img, M, (w, h))
            mask_dst = cv2.warpPerspective(mask, M, (w, h))

            padding = -1
            h, w = doc_dst.shape[:2]
            doc_dst = doc_dst[-padding:h+padding, -padding:w+padding]
            mask_dst = mask_dst[-padding:h+padding, -padding:w+padding]
            cv2.imwrite(os.path.join(dst_path, dst_sub_dirs[0], nm_head + '_doc_{}.jpg'.format(j)), doc_dst)
            cv2.imwrite(os.path.join(dst_path, dst_sub_dirs[1], nm_head + '_mask_{}.png'.format(j)), mask_dst)

            # cv2.imshow('doc_dst', doc_dst)
            # cv2.imshow('mask_dst', mask_dst)
            # cv2.waitKey(0)

if __name__=='__main__':
    imgs_root = r'G:\ICDAR\TrainVal\SBR-Doc-Dataset-V2\input'
    masks_root = r'G:\ICDAR\TrainVal\SBR-Doc-Dataset-V2\edge_segmentation_gt'
    dst_path = r'G:\ICDAR\Gen\csdn'
    dst_sub_dirs = ['docs', 'edge']
    get_doc_area(imgs_root, masks_root, dst_path, dst_sub_dirs)

Inference

1. The four vertices of the rectangular frame are sorted clockwise, https://blog.csdn.net/qq_36560894/article/details/113092614

2.cv2.findContours,https://blog.csdn.net/hjxu2016/article/details/77833336/

3.cv2.approxPolyDP,https://vimsky.com/examples/detail/python-method-cv2.approxPolyDP.html

4. Affine transformation, https://www.cnblogs.com/pheasant/p/13226182.html

comminicate

        Welcome everyone to join the group exchange and study

Guess you like

Origin blog.csdn.net/qq_36076233/article/details/118380676