tensorflow速度复习-RNN/LSTM

#Tensorflow->RNN/LSTM
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

#一、得到带标签的数据
mnist=input_data.read_data_sets("MNIST_data",one_hot=True)#载入数据集
n_inputs=28#输入一行,一行有28个元素
max_time=28#一共28行(#输入图片是28*28)
lstm_size=100#隐层单元
n_classes=10#十个分类
batch_size=64#每批次64个样本
n_batch=mnist.train.num_examples//batch_size#计算一共有多少个批次
x=tf.placeholder(tf.float32,[None,784])#第一个维度可以是任意长度
y=tf.placeholder(tf.float32,[None,10])#正确的标签

#二、定义参数初始化
weights=tf.Variable(tf.truncated_normal([lstm_size,n_classes],stddev=0.1))#初始化权值
biases=tf.Variable(tf.constant(0.1,shape=[n_classes]))#初始化偏置

#三、定义RNN网络
def RNN(X,weights,biases):
    inputs=tf.reshape(X,[-1,max_time,n_inputs])
    lstm_cell=tf.nn.rnn_cell.LSTMCell(lstm_size)
    outputs,final_state=tf.nn.dynamic_rnn(lstm_cell,inputs,dtype=tf.float32)
    results=tf.nn.softmax(tf.matmul(final_state[1],weights)+biases)
    return results
prediction=RNN(x,weights,biases)#计算结果
    
#四、模型评估
loss=tf.losses.softmax_cross_entropy(y,prediction)#损失函数
train_step=tf.train.AdamOptimizer(1e-3).minimize(loss)#使用AdamOptimizer进行优化,降低损失
correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#结果存放在一个布尔列表中
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))#求准确率

#五、进行训练
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())#全局变量初始化
    for epoch in range(2):#2个周期
        for batch in range(n_batch):#循环读取本批次每个样本
            batch_xs,batch_ys=mnist.train.next_batch(batch_size)#获得数据
            sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.7})#跑一次训练数据
        acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0}) #跑一次测试数据获得评估参数
        print("Iter "+str(epoch)+", Testing Accuracy= "+str(acc))#输出

猜你喜欢

转载自blog.csdn.net/cj1064789374/article/details/88223131