【高级RNN】预训练词嵌入导入文本分类任务——GLOVE模型

预训练词嵌入导入文本分类任务——GLOVE模型

       在深度学习中,词嵌入是文本模型中强大的组成部分。在应用中十分常见的做法是,先使用例如word2vec模型在大量无标记问版中训练出的词向量,然后在后续的任务(例如有监督的文件分类任务)中使用这些词向量。

       使用无监督方法从头训练词向量通常需要极大的语料库。在实践中,我们使用预训练的词嵌入,这些词嵌入是从巨大语料库上训练的,可以通过网络获取。

       在本文中,我们将展示如何将TensorFlow预训练的词嵌入应用在一个简单的文本分类任务中。并借此机会使用在深度学习应用中强大的组件,这些组件在自然语言理解中更加强大且不可或缺:双向RNN层和门限递归单元。

       本项目需要下载Common Crawl向量(840B个符号),进而推进本次示例。预训练向量的信息请参考网页项目网页(http://nlp.stanford.edu/projects/glove/

具体解释见代码注释

代码如下:

import zipfile
import numpy as np
import tensorflow as tf

path_to_glove = "glove/glove.840B.300d.zip"
PRE_TRAINED = True
GLOVE_SIZE = 300
batch_size = 128
embedding_dimension = 64
num_classes = 2
hidden_layer_size = 32
times_steps = 6

# 创建模拟数据
digit_to_word_map = {
    
    0: "PAD_TOKEN", 1: "One", 2: "Two", 3: "Three", 4: "Four",
                     5: "Five", 6: "Six", 7: "Seven", 8: "Eight", 9: "Nine"}
even_sentences = []
odd_sentences = []
seqlens = []

for i in range(10000):
    rand_seq_len = np.random.choice(range(3, 7))  # 在3-6范围内抽取一个随机数,作为生成句子的长度
    seqlens.append(rand_seq_len)  # 将抽取的随机数添加至末尾
    rand_odd_ints = np.random.choice(range(1, 10, 2), rand_seq_len)   # 在1-9之间获取数量为rand_seq_len的奇数
    rand_even_ints = np.random.choice(range(2, 10, 2), rand_seq_len)  # 在2-9之间获取数量为rand_seq_len的偶数
#                                                                      (个人认为应该改为range(0, 10, 2),但问题不大)
    if rand_seq_len < 6:  # 若长度不足6位,为了统一长度至6位,在末尾补0
        rand_odd_ints = np.append(rand_odd_ints, [0] * (6 - rand_seq_len))
        rand_even_ints = np.append(rand_even_ints, [0] * (6 - rand_seq_len))

    # 原文中even_sentences与rand_even_ints的对应关系是反的,此处为调整后的结果
    even_sentences.append(" ".join([digit_to_word_map[r] for r in rand_even_ints]))
    odd_sentences.append(" ".join([digit_to_word_map[r] for r in rand_odd_ints]))

# 组合奇偶语句
data = even_sentences + odd_sentences

# 原文说创建相同语句长度的奇偶语句,没看懂
seqlens *= 2

labels = [1] * 10000 + [0] * 10000
for i in range(len(labels)):        # 遍历labels
    label = labels[i]               # 获取当前标签
    one_hot_encoding = [0] * 2      # 初始化one_hot_encoding
    one_hot_encoding[label] = 1     # 设置one_hot_encoding相应标志位
    labels[i] = one_hot_encoding    # 修改labels形态

# 创建单词索引映射
word2index_map = {
    
    }
index = 0
for sent in data:                           # 遍历生成的奇偶语句
    for word in sent.split():               # 遍历当前语句
        if word not in word2index_map:      # 若该单词不在映射表里
            word2index_map[word] = index    # 添加该单词的映射
            index += 1

index2word_map = {
    
    index: word for word, index in word2index_map.items()}    # 将word2index_map键值对反转
vocabulary_size = len(index2word_map)


def get_glove(path_to_glove, word2index_map):
    embedding_weights = {
    
    }
    count_all_words = 0
    with zipfile.ZipFile(path_to_glove) as z:       # 解压glove文件
        with z.open("glove.840B.300d.txt") as f:    # 打开解压后的文件
            for line in f:                          # 按行读取
                vals = line.split()
                word = str(vals[0].decode("utf-8"))
                if word in word2index_map:
                    print(word)
                    count_all_words += 1
                    coefs = np.asarray(vals[1:], dtype='float32')
                    # numpy中,array和asarray都可以将结构数据转化为ndarray,但是主要区别就是当数据源是ndarray时,
                    # array仍然会copy出一个副本,占用新的内存,但asarray不会。
                    coefs /= np.linalg.norm(coefs)              # 计算矩阵的范数
                    embedding_weights[word] = coefs
                if count_all_words == vocabulary_size - 1:      # 映射9个单词后退出
                    break
    return embedding_weights


word2embedding_dict = get_glove(path_to_glove, word2index_map)  # 拿到9个单词的映射表

embedding_matrix = np.zeros((vocabulary_size, GLOVE_SIZE))      # 初始化矩阵

for word, index in word2index_map.items():
    if not word == "PAD_TOKEN":                                 # 绕开index为0的情况
        word_embedding = word2embedding_dict[word]
        embedding_matrix[index, :] = word_embedding             # 装填矩阵


# 创建训练和测试数据
data_indices = list(range(len(data)))
np.random.shuffle(data_indices)             # np.random.shuffle能将原list随机排序,并返回新的随机序列
data = np.array(data)[data_indices]         # 更改data排列顺序
labels = np.array(labels)[data_indices]     # 更改labels排列顺序
seqlens = np.array(seqlens)[data_indices]   # 更改seqlens排列顺序

# 取data、labels、seqlens的前10000位作为训练数据集
train_x = data[:10000]
train_y = labels[:10000]
train_seqlens = seqlens[:10000]

# 取data、labels、seqlens的后10000位作为测试数据集
test_x = data[10000:]
test_y = labels[10000:]
test_seqlens = seqlens[10000:]


def get_sentence_batch(batch_size, data_x, data_y, data_seqlens):
    instance_indices = list(range(len(data_x)))
    np.random.shuffle(instance_indices)                     # 将instance_indices打乱顺序
    batch = instance_indices[:batch_size]                   # 获取前batch_size行的instance_indices
    x = [[word2index_map[word] for word in data_x[i].split()] for i in batch]       # 通过word2index_map获取数值
    y = [data_y[i] for i in batch]                                                  # 直接获取真实标签
    seqlens = [data_seqlens[i] for i in batch]                                      # 直接获取相应的语句
    return x, y, seqlens


# 创建输入的占位符
_inputs = tf.placeholder(tf.int32, shape=[batch_size, times_steps])
embedding_placeholder = tf.placeholder(tf.float32, [vocabulary_size, GLOVE_SIZE])
_labels = tf.placeholder(tf.float32, shape=[batch_size, num_classes])
_seqlens = tf.placeholder(tf.int32, shape=[batch_size])

if PRE_TRAINED:
    embeddings = tf.Variable(tf.constant(0.0, shape=[vocabulary_size, GLOVE_SIZE]), trainable=True)
    # 若使用训练过的嵌入向量,将他们分配给这个嵌入变量
    embedding_init = embeddings.assign(embedding_placeholder)
    # 检索embeddings中对应的元素
    embed = tf.nn.embedding_lookup(embeddings, _inputs)
else:
    embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_dimension], -1.0, 1.0))
    embed = tf.nn.embedding_lookup(embeddings, _inputs)

with tf.name_scope("biGRU"):
    with tf.variable_scope('forward'):
        gru_fw_cell = tf.contrib.rnn.GRUCell(hidden_layer_size)     # 作为前向表示
        gru_fw_cell = tf.contrib.rnn.DropoutWrapper(gru_fw_cell)    # 丢弃部分数据

    with tf.variable_scope('backward'):
        gru_bw_cell = tf.contrib.rnn.GRUCell(hidden_layer_size)     # 作为反向表示
        gru_bw_cell = tf.contrib.rnn.DropoutWrapper(gru_bw_cell)    # 丢弃部分数据

    # bidirectional_dynamic_rnn为dynamic_rnn方法的双向延伸
    outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw=gru_fw_cell, cell_bw=gru_bw_cell,
                                                      inputs=embed, sequence_length=_seqlens,
                                                      dtype=tf.float32, scope="BiGRU")
states = tf.concat(values=states, axis=1)   # 取states的第一个维度

# tf.truncated_normald的作用从截断的正态分布中输出随机值。 shape表示生成张量的维度,mean是均值,stddev是标准差。
weights = {
    
    'linear_layer': tf.Variable(
    tf.truncated_normal([2*hidden_layer_size, num_classes],
                        mean=0, stddev=.01))}
biases = {
    
    'linear_layer': tf.Variable(
    tf.truncated_normal([num_classes],
                        mean=0, stddev=.01))}

# 提取最终状态并在线性层中使用
final_output = tf.matmul(states, weights['linear_layer']) + biases['linear_layer']

softmax = tf.nn.softmax_cross_entropy_with_logits(logits=final_output, labels=_labels)  # 计算loss的值
cross_entropy = tf.reduce_mean(softmax)                                                 # 求平均值
# RMSProp算法修改了AdaGrad的梯度积累为指数加权的移动平均,使得其在非凸设定下效果更好
train_step = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cross_entropy)
# 判断最大可能分类判断是否正确
correct_prediction = tf.equal(tf.argmax(_labels, 1),
                              tf.argmax(final_output, 1))
# 计算整体精确度
accuracy = (tf.reduce_mean(tf.cast(correct_prediction, tf.float32))) * 100

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    sess.run(embedding_init, feed_dict={
    
    embedding_placeholder: embedding_matrix})

    # 开始训练
    for step in range(1000):
        x_batch, y_batch, seqlen_batch = get_sentence_batch(batch_size, train_x, train_y, train_seqlens)
        sess.run(train_step, feed_dict={
    
    _inputs: x_batch, _labels: y_batch, _seqlens: seqlen_batch})

        # 打印当前训练结果
        if step % 10 == 0:
            acc = sess.run(accuracy, feed_dict={
    
    _inputs: x_batch, _labels: y_batch, _seqlens: seqlen_batch})
            print("Accuracy at %d: %.5f" % (step, acc))

    # 计算测试集精确度
    for test_batch in range(5):
        x_test, y_test, seqlen_test = get_sentence_batch(batch_size, test_x, test_y, test_seqlens)
        batch_pred, batch_acc = sess.run([tf.argmax(final_output, 1), accuracy],
                                         feed_dict={
    
    _inputs: x_test, _labels: y_test, _seqlens: seqlen_test})
        print("Test batch accuracy %d: %.5f" % (test_batch, batch_acc))

       本文示例参考《TensorFlow学习指南——深度学习系统构建详解》第六章第三节。

       欢迎各位大佬交流讨论!

猜你喜欢

转载自blog.csdn.net/weixin_42721167/article/details/112425806
今日推荐