CNN to LSTM编解码结构识别验证码

import tensorflow as tf
import os
import matplotlib.image as implt
import numpy as np

batch_size = 100
H = 60
W = 120


class Sample:
    def __init__(self):
        # 定义一个数据集,用于存放训练样本和标签
        self.datasets = []
        """
        获取训练样本和标签
        """
        for filename in os.listdir('D:\code'):  # 遍历每一个数据
            # 获取训练样本,os.path.join('D:\code',filename)是将两个字符串拼接在一起形成一个新的字符串作为路径
            x = implt.imread(os.path.join('D:\code', filename))
            y = filename.split('.')[0]  # 将文件名作为标签,对整个文件名进行分割获取真正的标签
            y = self.to_onehot(y)  # 将标签转化为onehot形式
            self.datasets.append([x, y])  # 将样本和标签打包添加进数据集

    def to_onehot(self, y):
        z = np.zeros([4, 10])  # 创建一个4行10列的全零数组构造出onehot形式
        # 遍历标签中的每一个字符
        for i in range(4):
            index = int(y[i])  # 将字符转化为整形
            z[i][index] = 1  # 将对应位置1
        return z  # 返回onehot形式的标签

    def get_batch(self, n):
        xs = []  # 用于存放拿到的随机样本
        ys = []  # 用于存放拿到的随机样本的标签
        for i in range(n):
            index = np.random.randint(0, len(self.datasets))  # 生成用于随机抽取样本的索引
            xs.append(self.datasets[index][0])
            ys.append(self.datasets[index][1])
        return xs, ys


class EncoderNet:
    def __init__(self):
        self.conv1_w = tf.Variable(tf.truncated_normal(shape=[3, 3, 3, 16], stddev=0.1))
        self.conv1_b = tf.Variable(tf.zeros(dtype=tf.float32, shape=[16]))

        self.conv2_w = tf.Variable(tf.truncated_normal(shape=[3, 3, 16, 32], stddev=0.1))
        self.conv2_b = tf.Variable(tf.zeros(dtype=tf.float32, shape=[32]))

        self.w1 = tf.Variable(tf.truncated_normal(shape=[15*30*32,100]))


    def forward(self, x):
        self.conv1 = tf.nn.relu(tf.nn.conv2d(x, self.conv1_w, [1, 1, 1, 1], padding="SAME") + self.conv1_b)
        self.pool1 = tf.nn.max_pool(self.conv1, [1, 2, 2, 1], [1, 2, 2, 1], padding="SAME")
        # 30,60
        self.conv2 = tf.nn.relu(tf.nn.conv2d(self.pool1, self.conv2_w, [1, 1, 1, 1], padding="SAME") + self.conv2_b)
        self.pool2 = tf.nn.max_pool(self.conv2, [1, 2, 2, 1], [1, 2, 2, 1], padding="SAME")
        # 15,30
        self.flat = tf.reshape(self.pool2,shape=[-1,15*30*32])
        self.output = tf.matmul(self.flat,self.w1)

        return self.output

class DecoderNet:
    def __init__(self):
        self.cell = tf.contrib.rnn.BasicLSTMCell(128)
        self.init_state = self.cell.zero_state(100, dtype=tf.float32)

        self.w1 = tf.Variable(tf.truncated_normal(shape=[128,10]))


    def forward(self,x):
        y = tf.expand_dims(x, axis=1)
        y = tf.tile(y, [1, 4, 1])
        y, _ = tf.nn.dynamic_rnn(self.cell, y, initial_state=self.init_state, time_major=False)

        y = tf.reshape(y,(-1,128))
        y = tf.nn.softmax(tf.matmul(y, self.w1))
        y = tf.reshape(y,(-1,4,10))
        return y

class Net:

    def __init__(self):
        self.x = tf.placeholder(dtype=tf.float32, shape=[batch_size, H,W, 3])
        self.y = tf.placeholder(dtype=tf.float32, shape=[batch_size, 4, 10])

        self.encoderNet = EncoderNet()
        self.decoderNet = DecoderNet()

    def forward(self):
        y = self.encoderNet.forward(self.x)
        self.output = self.decoderNet.forward(y)

    def backward(self):
        self.loss = tf.reduce_mean((self.output - self.y) ** 2)
        self.opt = tf.train.AdamOptimizer().minimize(self.loss)


if __name__ == '__main__':
    sample = Sample()
    net = Net()
    net.forward()
    net.backward()
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        for i in range(120):
            x, y = sample.get_batch(batch_size)
            loss, _ = sess.run([net.loss, net.opt], feed_dict={net.x: x, net.y: y})
            print(loss)

猜你喜欢

转载自blog.csdn.net/weixin_38241876/article/details/85465736