Tensorflow 基于minst手写数字数据集合建立RNN分类模型

rnn网络架构

读取数据

import tensorflow as tf
import input_data
import numpy as np
import matplotlib.pyplot as plt
print ("Packages imported")
import warnings

mnist = input_data.read_data_sets("data/", one_hot=True)
trainimgs, trainlabels, testimgs, testlabels = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels 
ntrain, ntest, dim, nclasses  = trainimgs.shape[0], testimgs.shape[0], trainimgs.shape[1], trainlabels.shape[1]
print ("MNIST loaded")

初始化参数

diminput  = 28
dimhidden = 128
dimoutput = nclasses
nsteps    = 28
weights = {
    'hidden': tf.Variable(tf.random_normal([diminput, dimhidden])), #28*128
    'out': tf.Variable(tf.random_normal([dimhidden, dimoutput]))#128*10
}
biases = {
    'hidden': tf.Variable(tf.random_normal([dimhidden])),
    'out': tf.Variable(tf.random_normal([dimoutput]))
}

rnn建模

def _RNN(_X, _W, _b, _nsteps, _name):
    #数据变换
    # 1. 排列输入来自[batchsize, nsteps, diminput] 
    #   => [nsteps, batchsize, diminput]
    _X = tf.transpose(_X, [1, 0, 2])
    # 2. Reshape input to [nsteps*batchsize, diminput] 
    _X = tf.reshape(_X, [-1, diminput])
    # 3. Input layer => Hidden layer
    _H = tf.matmul(_X, _W['hidden']) + _b['hidden']
    # 4. Splite data to 'nsteps' chunks. An i-th chunck indicates i-th batch data 
    #3得出的是全部数据的隐藏层数值,而rnn需要对每一个层进行切片成序列,进行rnn操作
    _Hsplit = tf.split(_H, _nsteps, 0) 
    # 5. Get LSTM's final output (_LSTM_O) and state (_LSTM_S)
    #    Both _LSTM_O and _LSTM_S consist of 'batchsize' elements
    #    Only _LSTM_O will be used to predict the output. 
#     with tf.variable_scope(_name) as scope:
        
#         scope.reuse_variables()
#         lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(dimhidden, forget_bias=1.0)
#         _LSTM_O, _LSTM_S = tf.nn.rnn(lstm_cell, _Hsplit,dtype=tf.float32)
#     # 6. Output
#     _O = tf.matmul(_LSTM_O[-1], _W['out']) + _b['out']    #取最后一个计算单位作为输出
    with tf.variable_scope(_name,reuse=tf.AUTO_REUSE):#重复使用参数节约空间,防止报错
        #版本更新弃用
        #scop.reuse_variables()
        #设计一个计算单元
        lstm_cell = tf.contrib.rnn.BasicLSTMCell(dimhidden,forget_bias=1.0)#forget_bias设置为1,代表不进行遗忘操作
        #版本更新已经弃用
        #lstm_cell = rnn_cell.BasicLSTMCell(dim_hidden,forget_bias=1.0)
        #利用RNN单元搭建网络,这里用的最简单的,其它以后在说
        _LSTM_O,_LSTM_S = tf.contrib.rnn.static_rnn(lstm_cell,_Hsplit,dtype=tf.float32)#_LSTM_O为当前计算单位的输出,_LSTM_S为传入下一计算单元的输入
        #版本更新已经弃用
        #_LSTM_O, _LSTM_S = tf.nn.rnn(lstm_cell, _Hsplit,dtype=tf.float32)
        _O = tf.matmul(_LSTM_O[-1], _W['out']) + _b['out']    #取最后一个计算单位作为输出
    # Return! 
    return {
        'X': _X, 'H': _H, 'Hsplit': _Hsplit,
        'LSTM_O': _LSTM_O, 'LSTM_S': _LSTM_S, 'O': _O 
    }
print ("Network ready")
# from   tensorflow.contrib import rnn
learning_rate = 0.001
x      = tf.placeholder("float", [None, nsteps, diminput])
y      = tf.placeholder("float", [None, dimoutput])
myrnn  = _RNN(x, weights, biases, nsteps, 'basic')
pred   = myrnn['O']
cost   = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels= y)) 
optm   = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Adam Optimizer
accr   = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred,1), tf.argmax(y,1)), tf.float32))
init   = tf.global_variables_initializer()
print ("Network Ready!")

模型求解

training_epochs = 5
batch_size      = 16
display_step    = 1
sess = tf.Session()
sess.run(init)
print ("Start optimization")
for epoch in range(training_epochs):
    avg_cost = 0.
    #total_batch = int(mnist.train.num_examples/batch_size)
    total_batch = 100
    # Loop over all batches
    for i in range(total_batch):
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        batch_xs = batch_xs.reshape((batch_size, nsteps, diminput))
        # Fit training using batch data
        feeds = {x: batch_xs, y: batch_ys}
        sess.run(optm, feed_dict=feeds)
        # Compute average loss
        avg_cost += sess.run(cost, feed_dict=feeds)/total_batch
    # Display logs per epoch step

    if epoch % display_step == 0:
            print("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
            feeds = {x: batch_xs, y: batch_ys}
            train_acc = sess.run(accr, feed_dict=feeds)
            print(" Training accuracy: %.3f" % (train_acc))
            feeds = {x: testimgs, y: testlabels}
            test_acc = sess.run(accr, feed_dict=feeds)
            print(" Test accuracy: %.3f" % (test_acc))
print ("Optimization Finished.")

猜你喜欢

转载自blog.csdn.net/qq_41686130/article/details/96140624