使用 rnn 训练词向量模型

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/qq_32023541/article/details/83582844

词向量说明如下:

词向量模型表征的是词语与词语之间的距离和联系,词向量也叫词嵌入 word embedding
CBOW 模型: 根据某个词的上下文,计算中间词出现的概率,预测的是中心词
Skip-Gram 模型: 跟据中间词,分别计算它的上下文概率,与 CBOW 模型相反,预测的是上下文

比如 "我喜欢你" 在Skip-Gram 中,取出其中的一个字当做输入,将其前面和后面的子分别当做标签,拆分如下:
"喜 我"
"喜 欢"
"欢 喜"
"欢 你"
每一行代表一个样本,第一列代表输入,第二列代表标签。即中间词取它的上下文为标签

而 CBOW 模型恰恰相反,上下文取它的中间词为标签
"我 喜"
"欢 喜"
"喜 欢"
"你 欢"

tf.nn.nce_loss 计算NCE 的损失值,主要说明如下
def nce_loss(weights, biases, inputs, labels, num_sampled, num_classes,
             num_true=1,
             sampled_values=None,
             remove_accidental_hits=False,
             partition_strategy="mod",
             name="nce_loss")
'''
假设nce_loss之前的输入数据是 K 维(也就是词向量的维度) 的,一共有 N 个类(也就是N个词),那么
weight.shape = (N, K)
bias.shape = (N)
inputs.shape = (batch_size, K)
labels.shape = (batch_size, num_true) num_true 就是对应的样本标签,也就是词id
num_true : 实际的正样本个数
num_sampled: 采样出多少个负样本
num_classes = N
sampled_values: 采样出的负样本,如果是None,就会用不同的sampler去采样。
remove_accidental_hits: 如果采样时不小心采样到的负样本刚好是正样本,要不要干掉
'''

nce_loss的实现逻辑如下:
_compute_sampled_logits: 通过这个函数计算出正样本和采样出的负样本对应的output和label
sigmoid_cross_entropy_with_logits: 通过 sigmoid cross entropy来计算output和label的loss,从而进行反向传播。
这个函数把最后的问题转化为了num_sampled+num_real个两类分类问题,然后每个分类问题用了交叉熵的损伤函数,也就是logistic regression常用的损失函数。
TF里还提供了一个softmax_cross_entropy_with_logits的函数,和这个有所区别

默认情况下,他会用log_uniform_candidate_sampler去采样。那么log_uniform_candidate_sampler是怎么采样的呢?他的实现在这里:
1、会在[0, range_max)中采样出一个整数k
2、P(k) = (log(k + 2) - log(k + 1)) / log(range_max + 1)
可以看到,k越大,被采样到的概率越小。
TF的word2vec实现里,词频越大,词的类别编号也就越小。因此,在TF的word2vec里,负采样的过程其实就是优先采词频高的词作为负样本。

batch 数据生成文件 datas.py 如下

# -*- coding:utf-8 -*-
import numpy as np
import tensorflow as tf
import random
import collections
from collections import Counter
import jieba

from sklearn.manifold import TSNE
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['font.sans-serif'] = ['SimHei']
mpl.rcParams['font.family'] = 'STSong'
mpl.rcParams['font.size'] = 20

training_file = "人体阴阳与电能.txt"

# 中文字
def get_ch_label(text_file):
    labels = ""
    with open(text_file,"rb") as f:
        for label in f :labels += label.decode("gb2312")
    return labels
    
# 分词
def fenci(training_data):
    seg_list = jieba.cut(training_data)
    training_ci = " ".join(seg_list)
    training_ci = training_ci.split()
    # 用空格将字符串分开
    training_ci = np.array(training_ci)
    training_ci = np.reshape(training_ci,[-1,])
    return training_ci
    
def build_dataset(words,n_words):
    count = [['UNK',-1]]
    # Counter 是计数器,统计词频,这里也就是统计前 n_words - 1 个最高的频率词
    count.extend(collections.Counter(words).most_common(n_words - 1))
    dictionary = dict()
    # 建立词典id
    for word,_ in count:
        dictionary[word] = len(dictionary)
    data = list()
    unk_count = 0
    for word in words:
        if word in dictionary:
            index = dictionary[word]
        else:
            index = 0
            unk_count += 1
        data.append(index)
    count[0][1] = unk_count
    reversed_dictionary = dict(zip(dictionary.values(),dictionary.keys()))
    return data,count,dictionary,reversed_dictionary

data_index = 0
def generate_batch(data,batch_size,num_skips,skip_window):
    global data_index
    assert batch_size % num_skips == 0
    assert num_skips <= 2 * skip_window
    
    batch = np.ndarray(shape = (batch_size),dtype = np.int32)
    labels = np.ndarray(shape = (batch_size,1),dtype = np.int32)
    # 每一个样本由 skip_window + 当前 target + 后 skip_window 组成
    span = 2 * skip_window + 1
    buffer = collections.deque(maxlen = span)
    if data_index + span > len(data):
        data_index = 0
    
    buffer.extend(data[data_index:data_index + span])
    data_index +=  span
    
    for i in range(batch_size // num_skips ):
        target = skip_window #target 在 buffer 中的索引为 skip_window
        targets_to_avoid = [skip_window]
        for j in range(num_skips):
            while target in targets_to_avoid:
                target = random.randint(0,span - 1)
            targets_to_avoid.append(target)
            batch[i*num_skips + j] = buffer[skip_window]
            labels[i*num_skips + j,0] = buffer[target]
            
        if data_index == len(data):
            buffer = data[:span]
            data_index = span
        else:
            buffer.append(data[data_index])
            data_index += 1
    data_index = (data_index + len(data) - span) % len(data)
    return batch,labels
        
def get_batch(batch_size,num_skips = 2,skip_window = 1):
    # print (collections.Counter(['a','a','b','b','b','c']).most_common(1))
    training_data = get_ch_label(training_file)
    print "总字数",len(training_data)
    # 分词后的一维词表
    training_ci = fenci(training_data)
    training_label,count,dictionary,words = build_dataset(training_ci,350)
    words_size = len(dictionary)
    print "字典词数",words_size
    # print('Sample data',training_label[:10],[words[i] for i in training_label[:10]])
    
    # 获取batch,labels
    batch,labels = generate_batch(training_label,batch_size = batch_size,num_skips = num_skips,skip_window = skip_window)
    return batch,labels,words,words_size

词向量训练和可视化如下:

# -*- coding:utf-8 -*-
from __future__ import unicode_literals
import sys
reload(sys)
sys.setdefaultencoding("utf-8")

from datas import get_batch,np,tf,plt,TSNE
batch_inputs,batch_labels,words,words_size =  get_batch(batch_size = 200)

batch_size = 128
embedding_size = 128
skip_window = 1
num_skips =2

valid_size = 16
valid_window = words_size / 2
valid_examples = np.random.choice(valid_window,valid_size,replace = False) # 0-valid_window 中的数据取 16 个,不能重复
num_sampled = 64 # 负采样个数

tf.reset_default_graph()
train_inputs = tf.placeholder(tf.int32,shape = [None])
train_labels = tf.placeholder(tf.int32,shape = [None,1])
valid_dataset = tf.constant(valid_examples,dtype = tf.int32)

with tf.device('/cpu:0'):
    embeddings = tf.Variable(tf.random_uniform([words_size,embedding_size],-1.0,1.0))
    embed = tf.nn.embedding_lookup(embeddings,train_inputs)
    # 计算 NCE 的loss 值
    nce_weights = tf.Variable(tf.truncated_normal([words_size,embedding_size],stddev = 1.0 /tf.sqrt(np.float32(embedding_size))))
    nce_biases = tf.Variable(tf.zeros([words_size]))

    loss = tf.reduce_mean(tf.nn.nce_loss(weights = nce_weights,biases = nce_biases,
                            labels = train_labels,inputs = embed,num_sampled = num_sampled,num_classes = words_size))
    optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)

    # 计算 minibach examples 和所有 embeddings 的 cosine 相似度
    norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings),axis = 1,keep_dims = True)) # 按行单位化
    normalized_embeddings = embeddings / norm # 单位化 embeddings
    validate_embeddings = tf.nn.embedding_lookup(normalized_embeddings,valid_dataset)
    # 就算出的余弦相似度,是一个矩阵,每一行代表某个 valid example 与每个词典的相似度
    similarity = tf.matmul(validate_embeddings,normalized_embeddings,transpose_b = True) # 余弦相似度矩阵

if __name__ == "__main__":
    num_steps = 100001
    with tf.Session(graph = tf.get_default_graph()) as sess:
        tf.initialize_all_variables().run()
        print ('Initialized')
        average_loss = 0
        for step in range(num_steps):
            feed_dict = {train_inputs:batch_inputs,train_labels:batch_labels}
            _,loss_val = sess.run([optimizer,loss],feed_dict = feed_dict)
            average_loss += loss_val
            emv = sess.run(embed,feed_dict = {train_inputs:[37,18]})
            #print "emv----------------------------------",emv[0]
            if step % 1000 ==0:
                average_loss /= 1000
                print 'Average loss at step ',step,':',average_loss
                average_loss = 0
            
                sim = similarity.eval(session = sess)
                for i in range(valid_size):
                    valid_word = words[valid_examples[i]]
                    top_k = 8
                    nearest = (-sim[i,:]).argsort()[1:top_k + 1] # argsort 返回的是数组值从小到大的索引值
                    log_str = 'Nearest to %s:' % valid_word
                    for k in range(top_k):
                        close_word = words[nearest[k]]
                        log_str = '%s,%s' %(log_str,close_word)
                    print log_str
        
        final_embeddings = sess.run(normalized_embeddings)
    
    # 将词向量可视化 
    def plot_with_labels(low_dim_embs,labels,filename = 'tsne.png'):
        assert low_dim_embs.shape[0] >= len(labels),'More labels than embeddings'
        plt.figure(figsize = (18,18))
        for i,label in enumerate(labels):
            x,y = low_dim_embs[i,:]
            plt.scatter(x,y)
            plt.annotate(label.decode("utf-8"),xy = (x,y),xytext = (5,2),textcoords ='offset points',ha = 'right',va = 'bottom')
        plt.savefig(filename)

    try:
        tsne = TSNE(perplexity = 30,n_components = 2,init = 'pca',n_iter = 5000)
        plot_only = 80 # 输出 100 个词
        low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
        labels = [unicode(words[i]) for i in range(plot_only)]
        plot_with_labels(low_dim_embs,labels)
    except:
        print "Save png Error"

运行结果如下:因为只有一篇 1700 字左右,所以效果不是很理想

生成的词向量模型可视化结果:其中距离越近代表词语之间的相关性越高

猜你喜欢

转载自blog.csdn.net/qq_32023541/article/details/83582844
今日推荐