使用循环神经网络实现语言模型

#coding:utf-8
from tensorflow.models.rnn.ptb import reader
import tensorflow as tf

import numpy as np

DATA_PATH = "./simple-examples/data"


#读取原始数据
# print (len(train_data))

# result = reader.ptb_iterator(train_data, 4, 5)
# x, y = result.__next__()
# print("X:", x)

HIDDEN_SIZE = 200
NUM_LAYERS = 2
VOCAB_SIZE = 10000

LEARNING_RATE = 1.0
TRAIN_BATCH_SIZE = 20
TRAIN_NUM_STEP = 35

EVAL_BATCH_SIZE = 1
EVAL_NUM_STEP = 1
NUM_EPOCH = 2
KEEP_PROB = 0.5
MAX_GRAD_NORM = 5

#通过一个PTBModel类来描述模型,这样方便维护神经网络的状态
class PTBModel(object):
    def __init__(self, is_training, batch_size, num_steps):
        self.batch_size = batch_size
        self.num_steps = num_steps

        #定义输入层。可以看到输入层的维度为batch_size * num_steps,这和ptb_iterator函数训练出来的训练数据batch是一致的。
        self.input_data = tf.placeholder(tf.int32, [batch_size, num_steps])

        #定义预期输出。它的维度和ptb_iterator函数的正确答案维度也是一样的。
        self.targets = tf.placeholder(tf.int32, [batch_size, num_steps])

        #定义使用LSTM结构为循环体结构且使用dropout的深层循环神经网络
        lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE)
        if is_training:
            lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=KEEP_PROB)
        cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * NUM_LAYERS)

        #初始化最初的状态,也就是全零的向量
        self.initial_state = cell.zero_state(batch_size, tf.float32)

        embedding = tf.get_variable("embedding", [VOCAB_SIZE, HIDDEN_SIZE])

        #将原本batch_size * num_steps个单词ID转化为单词向量,转化后的输入层维度为batch_size * num_steps * HIDDEN_SIZE
        inputs = tf.nn.embedding_lookup(embedding, self.input_data)

        #只在训练时使用dropout。
        if is_training:
            inputs = tf.nn.dropout(inputs, KEEP_PROB)

        #定义输出层列表。在这里先将不同时刻LSTM结构的输出收集起来,在通过一个全连接层得到最终的输出。
        outputs = []
        #state存储不同batch中LSTM的状态,将其初始化为0.
        state = self.initial_state
        with tf.variable_scope("RNN"):
            for time_step in range(num_steps):
                if time_step > 0:
                    tf.get_variable_scope().reuse_variables()
                #从输入数据中获取当时时刻获得的输入并传入LSTM结构.
                cell_output, state = cell(inputs[:, time_step, :], state)
                #将当前输出加入输出队列
                outputs.append(cell_output)

        #把输出队列展开成[batch,hidden_size*num_steps]的形状,然后再reshape成[batch*numsteps,hidden_size]的形状
        output = tf.reshape(tf.concat(outputs, 1), [-1, HIDDEN_SIZE])


        weight = tf.get_variable("weight", [HIDDEN_SIZE, VOCAB_SIZE])
        bias = tf.get_variable("bias", [VOCAB_SIZE])
        logits = tf.matmul(output, weight) + bias

        #定义交叉熵损失函数。TensorFlow提供了sequece_loss_by_example函数来计算一个序列的交叉熵的和
        loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example([logits], [tf.reshape(self.targets, [-1])],
                                                                  [tf.ones([batch_size * num_steps], dtype=tf.float32)])
        self.cost = tf.reduce_sum(loss) / batch_size
        self.final_state = state

        #只在训练时定义反向传播操作
        if not is_training:
            return
        trainable_variables = tf.trainable_variables()
        #通过clip_by_global_norm函数控制梯度的大小,避免梯度膨胀的问题
        grads, _ = tf.clip_by_global_norm(
            tf.gradients(self.cost, trainable_variables), MAX_GRAD_NORM
        )

        #定义优化方法
        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)

        #定义训练步骤
        self.train_op = optimizer.apply_gradients(
            zip(grads, trainable_variables))

#使用给定的模型model在数据data上运行train_op并返回在全部数据上的perplexity值
def run_epoch(session, model, data, train_op, output_log):
    #计算perplexity的辅助变量
    total_costs = 0.0
    iters = 0
    state = session.run(model.initial_state)
    #使用当前数据训练或者测试模型
    for step, (x,y) in enumerate(reader.ptb_iterator(data, model.batch_size, model.num_steps)):
        cost, state, _ = session.run([model.cost, model.final_state, train_op], {model.input_data:x, model.targets: y,model.initial_state:state})

        total_costs += cost
        iters += model.num_steps

        if output_log and step % 100 == 0:
            print("After %d steps, perplexity is %.3f" %(step, np.exp(total_costs / iters)))
    return np.exp(total_costs / iters )

def main():
    train_data, valid_data, test_data, _ = reader.ptb_raw_data(DATA_PATH)

    initializer = tf.random_uniform_initializer(-0.05, 0.05)

    with tf.variable_scope("language_model", reuse=None, initializer=initializer):
        train_model = PTBModel(True, TRAIN_BATCH_SIZE, TRAIN_NUM_STEP)

    #定义评测用多的神经网络
    with tf.variable_scope("language_model", reuse=True, initializer=initializer):
        eval_model = PTBModel(False, EVAL_BATCH_SIZE, EVAL_NUM_STEP)

    with tf.Session() as session:
        tf.initialize_all_variables().run()

        for i in range(NUM_EPOCH):
            print("In iteration: %d" % (i+1))

            run_epoch(session, train_model, train_data, train_model.train_op, True)

            #使用验证数据集评测模型效果
            valid_perplexity = run_epoch(
                session, eval_model, valid_data, tf.no_op(), False

            )
            print("Epoch: %d Validation Perplexity: %.3f" % (i + 1, valid_perplexity))

        test_perplexity = run_epoch(
            session,eval_model, test_data, tf.no_op(), False
        )
        print("Test Perplexity: %.3f" %test_perplexity)

if __name__ == "__main__":
    main()
In iteration: 1
After 0 steps, perplexity is 9983.594
After 100 steps, perplexity is 1381.611
After 200 steps, perplexity is 1015.572
After 300 steps, perplexity is 855.255
After 400 steps, perplexity is 750.932
After 500 steps, perplexity is 678.441
After 600 steps, perplexity is 625.717
After 700 steps, perplexity is 581.618
After 800 steps, perplexity is 542.609
After 900 steps, perplexity is 512.290
After 1000 steps, perplexity is 488.754
After 1100 steps, perplexity is 466.114
After 1200 steps, perplexity is 447.491
After 1300 steps, perplexity is 430.685
Epoch: 1 Validation Perplexity: 256.287
In iteration: 2
After 0 steps, perplexity is 378.406
After 100 steps, perplexity is 261.935
After 200 steps, perplexity is 267.274
After 300 steps, perplexity is 268.162
After 400 steps, perplexity is 265.566
After 500 steps, perplexity is 263.393
After 600 steps, perplexity is 262.826
After 700 steps, perplexity is 260.241
After 800 steps, perplexity is 255.630
After 900 steps, perplexity is 252.962
After 1000 steps, perplexity is 251.316
After 1100 steps, perplexity is 247.905
After 1200 steps, perplexity is 245.479
After 1300 steps, perplexity is 242.793
Epoch: 2 Validation Perplexity: 201.876
Test Perplexity: 196.534

从输出看出,在迭代开始时perplexity值为9983.594,相当于从10000个单词中随机选择下一个单词,训练结束后,减小到196个。

猜你喜欢

转载自blog.csdn.net/qq_34000894/article/details/81244399