kreas 记录

#-*- coding: utf-8 -*-

import keras
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('Keras version:', keras.__version__)

#from numpy.random import seed
#seed(1)
#from tensorflow import set_random_seed
#set_random_seed(2)

import os
from os.path import join
import json
import random
import itertools
import re
import datetime
import cairocffi as cairo
import editdistance
import numpy as np
from scipy import ndimage
#import pylab
#import matplotlib.pyplot as plt
#import matplotlib.gridspec as gridspec
from keras import backend as K
from keras import regularizers
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.layers.wrappers import TimeDistributed, Bidirectional
from keras.layers import Input, Dense, Activation, Dropout, Permute, Flatten
from keras.layers import Reshape, Lambda
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import add, concatenate
from keras.models import Model, load_model
from keras.layers.recurrent import GRU, LSTM
from keras.optimizers import SGD, Adam, Adadelta
from keras.utils.data_utils import get_file
from keras.preprocessing import image
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.utils import multi_gpu_model
#from keras.applications.vgg16 import VGG16
from keras.applications.xception import Xception
import cv2
import logging
from collections import Counter
import codecs

# -------------------------config section-------------------------
# 身份证四点回归
imagepath = "/workdir/data/cardbounds/"
imagenamelist = "/workdir/data/filelist"
imgfilename = {"front": "front.jpg", "back": "back.jpg"}
jsonfilename = {"front": "cardbound_front.json", "back": "cardbound_back.json"}
train_range_idx = [1, 700]
val_range_idx = [701, 850]
test_range_idx = [851, 900]
gpu_count = 1
gpu_list = "0,1,2,3"    #指定使用的GPU列表
image_size = 299        #  image_size = image_h = image_w ,输入图片的大小
output_model_path = "/workdir/models/"
load_weight_filename = "weights.24-245.81.hdf5"
load_weight_flag = False
model_filename = "idcard_corner.h5"
load_model_flag = False
# -------------------------config section-------------------------

logging.basicConfig(filename=join(output_model_path, "result.log"), level=logging.INFO) #日志基础配置(记录日志)

config = tf.ConfigProto()
config.allow_soft_placement = True                   #如果你指定的设备不存在,允许TF自动分配设备
config.gpu_options.allow_growth = True               #GPU显存根据需求增长
config.gpu_options.visible_device_list = gpu_list    #设置了tensorflow可见的GPU编号
sess = tf.Session(config=config)
K.set_session(sess)                                   # 开启后端的模块中的函数

# ----------------------------------------------------------------------------
imagenames = []
with open(imagenamelist, 'r') as f:                 #将txt中的label加入列表中
    for line in f:
        line = line.strip()
        imagenames.append(line)

# ---- 数据生成器 ----
class ImageGenerator:
    def __init__(self, 
                 dirpath,
                 range_idx,
                 img_size, 
                 batch_size):
        
        self.img_size = img_size
        self.batch_size = batch_size
        beg_idx, end_idx = range_idx[0], range_idx[1]     #index 中的第一序号和最后序号
        self.samples = []

        for i in range(beg_idx, end_idx+1):               #
            for side in ["front", "back"]:
                json_filepath = join(dirpath, imagenames[i-1], jsonfilename[side])  #遍历json文件
                if(not os.path.exists(json_filepath)) :
                    continue
                img_filepath = join(dirpath, imagenames[i-1], imgfilename[side])    #遍历照片
                if(not os.path.exists(img_filepath)) :
                    continue
                ann = json.load(codecs.open(json_filepath, 'r', encoding='utf-8'))  #codecs.open读入json直接解码
                idcardcorners = np.float32([ann['lt']['x'], ann['lt']['y'], ann['rt']['x'], ann['rt']['y'], ann['rb']['x'], ann['rb']['y'], ann['lb']['x'], ann['lb']['y']])
                self.samples.append([img_filepath, idcardcorners])   #将图片名和角点坐标加入列表中
        
        self.n = len(self.samples)
        print("sample size======= ", self.n)
        random.shuffle(self.samples)         #打乱
        if(self.batch_size < 0):
            self.batch_size = self.n
        self.indexes = list(range(self.n))
        self.cur_index = 0
        
    def build_data(self):  #预处理角点数据(将图片resize至288大小,符合模型输入),相应标注等比例变化
        self.imgs = np.zeros((self.n, self.img_size, self.img_size, 3))  #建立图片数据空间(batch+size)
        #self.imgs = np.zeros((self.n, self.img_size, self.img_size))
        self.corners = []
        for i, (img_filepath, corner) in enumerate(self.samples):
            img = cv2.imread(img_filepath)
            h, w, channel =  img.shape
            #img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = cv2.resize(img, (self.img_size, self.img_size), interpolation=cv2.INTER_CUBIC)    # ---- resize ---
            img = img.astype(np.float32)   # ------------ uint8 转换成 float32 -------
            img /= 255                     # ---- img 的每一个像素都除以255, 归一化 -------
            self.imgs[i, :, :, :] = img       # ------ img数据放入列表
            #self.imgs[i, :, :] = img       # ------ img数据放入列表
            corner[0] *= np.float32(self.img_size) / np.float32(w) 
            corner[2] *= np.float32(self.img_size) / np.float32(w) 
            corner[4] *= np.float32(self.img_size) / np.float32(w) 
            corner[6] *= np.float32(self.img_size) / np.float32(w)
            corner[1] *= np.float32(self.img_size) / np.float32(h) 
            corner[3] *= np.float32(self.img_size) / np.float32(h) 
            corner[5] *= np.float32(self.img_size) / np.float32(h) 
            corner[7] *= np.float32(self.img_size) / np.float32(h) 
            self.corners.append(corner)        
    
    def next_sample(self):       #随机打乱训练图片
        self.cur_index += 1
        if self.cur_index >= self.n:
            self.cur_index = 0
            random.shuffle(self.indexes)
        return self.samples[self.indexes[self.cur_index]][0], self.imgs[self.indexes[self.cur_index]], self.corners[self.indexes[self.cur_index]]
                # 图片名,图片数据,角点数据

    def next_batch(self):
        while True:
            #if K.image_data_format() == 'channels_first':  # ---- 通道数是在三维数组的第几维-----
            #    X_data = np.ones([self.batch_size, 1, self.img_size, self.img_size])
            #    #X_data = np.ones([self.batch_size, 1, self.img_h, self.img_w])
            #else:
            #    X_data = np.ones([self.batch_size, self.img_size, self.img_size, 1])
            #    #X_data = np.ones([self.batch_size, self.img_h, self.img_w, 1])
            X_data = np.ones([self.batch_size, self.img_size, self.img_size, 3])
            Y_data = np.ones([self.batch_size, 8]) # 初始化label
            source_str = []
            filenames = []
            loss_out = np.zeros((self.batch_size, 1))
                                   
            for i in range(self.batch_size):
                filename, img, corner = self.next_sample()
                #img = img.T                          # -------- 矩阵转置 --------
                #if K.image_data_format() == 'channels_first':
                #    img = np.expand_dims(img, 0)     # -------- 在第一维增加一个维度(通道数,默认1)--------
                #else:                               
                #    img = np.expand_dims(img, -1)    # --------- 在最后一维增加一个维度(通道数,默认1) -------
                X_data[i] = img                      # --------- 图片赋值 ------------
                Y_data[i] = corner
                filenames.append(filename)
 
            inputs = {
                'the_input': X_data,
                'y_true': Y_data,
                'filenames': filenames
            }
            outputs =  {'loss_out': loss_out} 
            yield (inputs, outputs)   #利用生成器,相当于一个步进器,每次见到yileld停止,下次从此处步进


#---------------------迭代一个batch,给出一些基本说明-----------------------
tiger = ImageGenerator(imagepath, train_range_idx, image_size, 1)
tiger.build_data()

for inp, out in tiger.next_batch():         #打印出训练的图片大小和标签
    print('Text generator output (data which will be fed into the neutral network):')
    print('1) the_input (image)', inp['the_input'][0].shape)
    print('2) the_labels is {0}'.format(inp['y_true'][0]))
    break
#--------------------------------------------------------------------------
def lambda_loss_func(args):              #定义损失函数
    y_pred, y_true = args
    return K.mean(K.square(y_pred - y_true))

def train(resume=False):
    saved_checkpoint_path = join(output_model_path, "weights.{epoch:02d}-{val_loss:.2f}.hdf5")
    load_checkpoint_path = join(output_model_path, load_weight_filename)
    # Input Parameters
    # Network parameters

    if K.image_data_format() == 'channels_first':           #调整图片维度数据
        input_shape = (3, image_size, image_size)
    else:
        input_shape = (image_size, image_size, 3)

    batch_size = 8

    tiger_train = ImageGenerator(imagepath, train_range_idx, image_size, batch_size)
    tiger_train.build_data()
    tiger_val = ImageGenerator(imagepath, val_range_idx, image_size, batch_size)
    tiger_val.build_data()
    
    input_data = Input(name='the_input', shape=input_shape, dtype='float32')
    
    #vgg16 = VGG16(input_tensor=input_data, weights='imagenet', include_top=False)
    xception = Xception(input_tensor=input_data, weights='imagenet', include_top=False)
    
    inner = Flatten()(xception.output)
    inner = Dense(512)(inner)
    inner = Dense(64)(inner)

    y_pred = Dense(8, name='y_pred')(inner)

    Model(inputs=input_data, outputs=y_pred).summary()

    y_true = Input(name='y_true', shape=[8], dtype='float32')    
    loss_out = Lambda(lambda_loss_func, output_shape=(1,), name='loss_out')([y_pred, y_true])

    # clipnorm seems to speeds up convergence
    #sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
    adam = Adam()
    
    model = Model(inputs=[input_data, y_true], outputs=loss_out)

    if(gpu_count > 1):
        parallel_model = multi_gpu_model(model, gpus=gpu_count)
    elif(gpu_count == 1):
        parallel_model = model

    if resume:
        parallel_model.load_weights(load_checkpoint_path)

    parallel_model.compile(loss={'loss_out': lambda y_true, y_pred: y_pred}, optimizer=adam)

    early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1, mode='min')
    checkpoint = ModelCheckpoint(saved_checkpoint_path, monitor='val_loss', save_best_only=False, mode='min', save_weights_only=True)
    #每个训练期之后保存模型
    parallel_model.fit_generator(generator=tiger_train.next_batch(),   #next_batch返回输入和输出
                        steps_per_epoch=tiger_train.n,
                        epochs=60,
                        callbacks=[early_stopping, checkpoint],
                        validation_data=tiger_val.next_batch(),
                        validation_steps=tiger_val.n,
                        verbose=1)
    #Python 生成器逐批生成的数据,按批次训练模型。

    return model

if not load_model_flag:     #选择训练模式还是测试模式,load_model_flag=False为训练模式
    model = train(resume=load_weight_flag)#是否加载预训练权重  ,load_model_flag=False 为加载预训练权重
    model.save(join(output_model_path, model_filename))
else:
# -------- test --------
# load from a pre-trained model
    model = load_model(join(output_model_path, model_filename), compile=False)

tiger_test = ImageGenerator(imagepath, test_range_idx, image_size, 1)
tiger_test.build_data()

valid_samples_count = tiger_test.n
sample_count = 0
print("valid samples: ", valid_samples_count)

net_inp = model.get_layer(name='the_input').input #根据索引值查找网络层
net_out = model.get_layer(name='y_pred').output

for inp_value, _ in tiger_test.next_batch():
    bs = inp_value['the_input'].shape[0]
    X_data = inp_value['the_input']
    pred_corners = sess.run(net_out, feed_dict={net_inp:X_data})  #从模型中输出预测的角点数据
    labels = inp_value['y_true']        #真实的角点数据
    filenames = inp_value['filenames']
   
    for i in range(1):
        sample_count += 1
        print("filename:", filenames[i])
        print("predict :", pred_corners[i])
        print("true    :", labels[i])
        img = cv2.imread(filenames[i])
        h, w, channel = img.shape
        pred_corners[i][0] *= np.float32(w) / np.float32(image_size) 
        pred_corners[i][2] *= np.float32(w) / np.float32(image_size)
        pred_corners[i][4] *= np.float32(w) / np.float32(image_size)
        pred_corners[i][6] *= np.float32(w) / np.float32(image_size)
        pred_corners[i][1] *= np.float32(h) / np.float32(image_size)
        pred_corners[i][3] *= np.float32(h) / np.float32(image_size)
        pred_corners[i][5] *= np.float32(h) / np.float32(image_size)
        pred_corners[i][7] *= np.float32(h) / np.float32(image_size)
        labels[i][0] *= np.float32(w) / np.float32(image_size) 
        labels[i][2] *= np.float32(w) / np.float32(image_size)
        labels[i][4] *= np.float32(w) / np.float32(image_size)
        labels[i][6] *= np.float32(w) / np.float32(image_size)
        labels[i][1] *= np.float32(h) / np.float32(image_size)
        labels[i][3] *= np.float32(h) / np.float32(image_size)
        labels[i][5] *= np.float32(h) / np.float32(image_size)
        labels[i][7] *= np.float32(h) / np.float32(image_size)
        for idx in range(4):   #图片上绘图
            cv2.circle(img, (int(labels[i][2*idx]),int(labels[i][2*idx+1])), 3, (255,255,0), -1)
            cv2.circle(img, (int(pred_corners[i][2*idx]), int(pred_corners[i][2*idx+1])), 3, (255,0,0), -1)
            cv2.imwrite(os.path.join(os.path.split(filenames[i])[0], "predict_"+os.path.split(filenames[i])[1]), img)
    if sample_count == valid_samples_count:
        break

猜你喜欢

转载自blog.csdn.net/weixin_38740463/article/details/89421605