[CV], and one mounting plate recognition method for correcting plate


0. mounting plate recognition

https://blog.csdn.net/lilai619/article/details/79695109
1.git clone-date documentation

cd /opt/deploy
git clone  https://github.com/sergiomsilva/alpr-unconstrained.git

2. Download the pre-training model

 cd alpr-unconstrained/
 bash get-networks.sh 

3. Delete the project comes Darknet replaced by official comes darknet

rm -rf darknet
git clone https://github.com/pjreddie/darknet

4. Change the official darknet support gpu and make due to my cudn drive installed in the default location so I just need to change three

cd darknet/
vim Makefile

将第12行的  已支持GPU
GPU=0
CUDNN=0
修改成: 
GPU=1
CUDNN=124行的  支持 cudnn
NVCC=nvcc
修改成:
NVCC=/usr/local/cuda/bin/nvcc

:wq

5. Compile

make all -j6 根据自己核数调整

6. Re-enter the main directory

cd /opt/alpr-unconstrained
cp -R data/* darknet/data/

7. Change file names

vim data/ocr/ocr-net.names 

0
1
2
3
4
5
6
7
8
9
A
B
C
D
E
F
G
H
I
J
K
L
M
N
P
Q
R
S
T
U
V
W
X
Y
Z
京
沪
津
渝
冀
晋
蒙
辽
吉
黑
苏
浙
皖
闽
赣
鲁
豫
鄂
湘
粤
桂
琼
川
贵
云
藏
陕
甘
青
宁
新

Position changes corresponding class training files are located

vim data/ocr/ocr-net.data

classes=66
names=data/ocr/ocr-net.names
train=data/ocr/train.txt
backup=data/ocr/output

Create output directory

mkdir -p data/ocr/output

CFG layers and modify training parameters of the network layer

cp /opt/deploy/darknet/cfg/yolov3.cfg data/ocr/ocr-net.cfg

vim data/ocr/ocr-net.cfg 

据自己GPU 和内存来指定 cfg 部分

训练的时候将第34行的  已支持GPU
batch=64
subdivisions=4

[net]
# Testing
# batch=1
# subdivisions=1
# Training
 batch=64
 subdivisions=8

......

[convolutional]
size=1
stride=1
pad=1
filters=33###75

activation=linear

[yolo]
mask = 6,7,8
anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
classes=6###20
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
random=0###1

......

[convolutional]
size=1
stride=1
pad=1
filters=33###75
activation=linear

[yolo]
mask = 3,4,5
anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
classes=6###20
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
random=0###1

......

[convolutional]
size=1
stride=1
pad=1
filters=33###75
activation=linear

[yolo]
mask = 0,1,2
anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
classes=6###20
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
random=0###1
————————————
filters数目是怎么计算的:3x(classes数目+5),和聚类数目分布有关,论文中有说明;
比如说我有66类  那么 3*66+5=213

1. A method of correcting plate

https://github.com/zeusees/HyperLPR
Here Insert Picture Description
Here Insert Picture Description
Here Insert Picture Description
Here Insert Picture Description
part of the source code (I added a point comment)

def findContoursAndDrawBoundingBox(image_rgb):
 
 
    line_upper  = [];
    line_lower = [];
 
    line_experiment = []
    grouped_rects = []
    gray_image = cv2.cvtColor(image_rgb,cv2.COLOR_BGR2GRAY)
 
    # for k in np.linspace(-1.5, -0.2,10):
    for k in np.linspace(-50, 0, 15):
 
        # thresh_niblack = threshold_niblack(gray_image, window_size=21, k=k)
        # binary_niblack = gray_image > thresh_niblack
        # binary_niblack = binary_niblack.astype(np.uint8) * 255
 
        # 当一幅图像上的不同部分具有不同亮度时,我们需要采用自适应阈值.此时的阈值是根据图像上的每一个小区域计算与其
        # 对应的阈值.因此,在同一幅图像上的不同区域采用的是不同的阈值,从而使我们能在亮度不同的情况下得到更好的结果.
        """
        Args:
         - src, 原图像,应该是灰度图
         -  x, 指当像素值高于(有时是低于)阈值时应该被赋予新的像素值, 255是白色
         - adaptive_method, CV_ADAPTIVE_THRESH_MEAN_C 或 CV_ADAPTIVE_THRESH_GAUSSIAN_C
         - threshold_type: 指取阈值类型
          . CV_THRESH_BINARY, 二进制阈值化
          . CV_THRESH_BINARY_INV, 反二进制阈值化
         - block_size: 用来计算阈值的像素邻域大小(块大小):3,5,7,...
         - param1: 指与方法有关的参数.对方法CV_ADAPTIVE_THRESH_MEAN_C和CV_ADAPTIVE_THRESH_GAUSSIAN_C,它是一个从均值或加权均值提取的常数,尽管它可以是负数。
          . 对方法 CV_ADAPTIVE_THRESH_MEAN_C,先求出块中的均值,再减掉param1。
          . 对方法 CV_ADAPTIVE_THRESH_GAUSSIAN_C ,先求出块中的加权和(gaussian), 再减掉param1。
        """
        binary_niblack = cv2.adaptiveThreshold(gray_image,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,17,k) #邻域大小17是不是太大了??
        #cv2.imshow("image1",binary_niblack)
        #cv2.waitKey(0)
        #imagex, contours, hierarchy = cv2.findContours(binary_niblack.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
        contours, hierarchy = cv2.findContours(binary_niblack.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)  # modified by bigz
        for contour in contours:
            #用一个最小的矩形,把找到的形状包起来
            bdbox = cv2.boundingRect(contour)
            if (bdbox[3]/float(bdbox[2])>0.7 and bdbox[3]*bdbox[2]>100 and bdbox[3]*bdbox[2]<1200) or (bdbox[3]/float(bdbox[2])>3 and bdbox[3]*bdbox[2]<100):
                # cv2.rectangle(rgb,(bdbox[0],bdbox[1]),(bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]),(255,0,0),1)
                line_upper.append([bdbox[0],bdbox[1]])
                line_lower.append([bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]])
 
                line_experiment.append([bdbox[0],bdbox[1]])
                line_experiment.append([bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]])
                # grouped_rects.append(bdbox)
 
    """
    想为图像周围建一个边使用訪函数,这经常在卷积运算或0填充时被用到
    Args:
     - src: 输入图像
     - top,bottom,left,right 对应边界的像素数目
     - borderType: 要添加哪种类型的边界
       - BORDER_CONSTANT #边缘填充用固定像素值,比如填充黑边,就用0,白边255
       - BORDER_REPLICATE #用原始图像相应的边缘的像素去做填充,看起来有一种把图像边缘"拉糊了"的效果
    """
    rgb = cv2.copyMakeBorder(image_rgb,30,30,0,0,cv2.BORDER_REPLICATE)
    leftyA, rightyA = fitLine_ransac(np.array(line_lower),3)
    rows,cols = rgb.shape[:2]
 
    # rgb = cv2.line(rgb, (cols - 1, rightyA), (0, leftyA), (0, 0, 255), 1,cv2.LINE_AA)
 
    leftyB, rightyB = fitLine_ransac(np.array(line_upper),-3)
 
    rows,cols = rgb.shape[:2]
 
    # rgb = cv2.line(rgb, (cols - 1, rightyB), (0, leftyB), (0,255, 0), 1,cv2.LINE_AA)
    pts_map1  = np.float32([[cols - 1, rightyA], [0, leftyA],[cols - 1, rightyB], [0, leftyB]])
    pts_map2 = np.float32([[136,36],[0,36],[136,0],[0,0]])
    mat = cv2.getPerspectiveTransform(pts_map1,pts_map2)
    image = cv2.warpPerspective(rgb,mat,(136,36),flags=cv2.INTER_CUBIC)
    #校正角度
    #cv2.imshow("校正前",image)
    #cv2.waitKey(0)
    image,M = deskew.fastDeskew(image)
    #cv2.imshow("校正后",image)
    #cv2.waitKey(0)
    return image
Published 89 original articles · won praise 108 · views 20000 +

Guess you like

Origin blog.csdn.net/weixin_43435675/article/details/99625613