TensorFlow 系列案例(2):自然语言处理-TensorFlow + Word2Vec

TensorFlow 卷积神经网络系列案例(1):猫狗识别 https://blog.csdn.net/duan_zhihua/article/details/81156693

TensorFlow 系列案例(2):自然语言处理-TensorFlow + Word2Vec

     自然语言处理-TensorFlow + Word2Vec的步骤: 

  1. 加载倚天屠龙记的小说数据。   
  2. 将文本数据转换成Word2vec。创建TensorFlow Graph()计算图;使用tf.nn.nce_loss计算 NCE 损失函数,根据每个单词的嵌套向量,使用噪声-比对的训练方式预测目标单词;对TensorFlow 的变量进行初始化。  
  3. 对读入每行的句子进行训练,保存模型。
  4. 加载训练好的模型,对测试单词['郭靖', '黄蓉'],计算和测试单词关联的词语,及单词相似度。

      关于自然语言处理Word2vec的原理、数学推导、词频处理等内容不进行展开,本文重点关注TensorFlow框架的使用,案例代码来源于网上资料,在TensorFlow学习过程中对代码做了一些测试修改,在此致谢网上各位AI大牛的贡献!
 

 本文使用的代码目录结构如下:

  •     word2vec_full_code.py代码:
# -*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
import math
import pickle as pkl
from pprint import pprint
import collections
import re
import jieba
import os.path as path
import os
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from scipy.misc import imread
import string
import pandas as pd

from  entity.Word2vec import Word2vec

def draw_wordcloud(word_list=[]):
    str_convert = ' '.join(word_list)
    word_cloud = WordCloud(font_path="simsun.ttf").generate(str_convert)
    plt.imshow(word_cloud, interpolation='bilinear')
    # 保存图片
    word_cloud.to_file('word_cloud.jpg')
    # 显示词云图片
    plt.imshow(word_cloud)
    plt.show()

# 统计词频
def word_freq(word_list=[]):
    wordDF = pd.DataFrame({'word': word_list})
    wordStat = wordDF.groupby(by=["word"])["word"].agg({
        "计数": np.size
    }).reset_index().sort_values(
        by=["计数"],
        ascending=False
    );
    wordStat.to_csv('cipin.csv', sep=',', header=True, index=True, encoding='utf-8')


def get_wordList():
    global sentence_list, word_list
    # step 1 读取停用词
    stop_words = []
    with open('stop_words_zh_UTF-8.txt', encoding='utf-8') as f:
        line = f.readline()
        while line:
            stop_words.append(line[:-1])
            line = f.readline()
    stop_words = set(stop_words)
    print('停用词读取完毕,共{n}个单词'.format(n=len(stop_words)))
    # step2 读取文本,预处理,分词,得到词典
    raw_word_list = []
    sentence_list = []
    with open('倚天屠龙记2.txt', encoding='gbk') as f:
        line = f.readline()
        while line:
            while '\n' in line:
                line = line.replace('\n', '')
            while ' ' in line:
                line = line.replace(' ', '')
            if len(line) > 0:  # 如果句子非空
                raw_words = list(jieba.cut(line, cut_all=False))
                dealed_words = []
                for word in raw_words:
                    if word not in stop_words and word not in ['qingkan520', 'www', 'com', 'http']:
                        raw_word_list.append(word)
                        dealed_words.append(word)
                sentence_list.append(dealed_words)
            line = f.readline()
    word_freq(raw_word_list)
    draw_wordcloud(raw_word_list)
    word_count = collections.Counter(raw_word_list)
    print('文本中总共有{n1}个单词,不重复单词数{n2},选取前40000个单词进入词典'
          .format(n1=len(raw_word_list), n2=len(word_count)))
    word_count = word_count.most_common(40000)
    word_list = [x[0] for x in word_count]
    return word_list


if __name__ == '__main__':
    word_list=get_wordList()
    # 创建模型,训练
    w2v = Word2vec(vocab_list=word_list,  # 词典集
                   embedding_size=200,
                   win_len=2,
                   learning_rate=1,
                   num_sampled=100,  #  采样个数
                   logdir='./tensorboard/',
                   )

    num_steps = 10000
    for i in range(num_steps):
        # sentence_list 的一行记录如<class 'list'>: ['\u3000', '\u3000', '金庸', '倚天', '屠龙记']
        sent = sentence_list[i % len(sentence_list)]
        w2v.train_by_sentence([sent])
    w2v.save_model('model')

    w2v.load_model('model')
    test_word = ['郭靖', '黄蓉']
    test_id = [word_list.index(x) for x in test_word] #<class 'list'>: [2264, 1980]
    test_words, near_words, sim_mean, sim_var = w2v.cal_similarity(test_id)
    print(test_words, near_words, sim_mean, sim_var)
  • Word2vec.py 代码:
# -*- coding: utf-8 -*-
import collections
import tensorflow as tf
import numpy as np
import os
import math
import pickle as pkl
class Word2vec():
    def __init__(self,
                 vocab_list=None,
                 #embedding_size=200,
                 embedding_size=100,
                 win_len=3,  # 上下文滑动窗口大小
                 num_sampled=1000,
                 learning_rate=1.0,
                 logdir='./tensorboard/',
                 model_path=None
                 ):

        # 获得模型的基本参数
        self.batch_size = None  #  数据批量大小
        if model_path != None:
            self.load_model(model_path)
        else:
            # model parameters
            assert type(vocab_list) == list
            self.vocab_list = vocab_list
            self.vocab_size = vocab_list.__len__()
            self.embedding_size = embedding_size
            self.win_len = win_len
            self.num_sampled = num_sampled
            self.learning_rate = learning_rate
            self.logdir = logdir

            self.word2id = {}  # word => id 的映射,例如 {'道': 0, '\u3000': 1, '张翠山': 2, '便': 3, '不': 4}
            for i in range(self.vocab_size):
                self.word2id[self.vocab_list[i]] = i # 内容见word2id.txt

            
            self.train_words_num = 0   
            self.train_sents_num = 0   
            self.train_times_num = 0   

       
            self.train_loss_records = collections.deque(maxlen=10)            
            self.train_loss_k10 = 0
        #构建图
        self.build_graph()
        #self.init_op()



        if model_path != None:
            tf_model_path = os.path.join(model_path, 'tf_vars')
            self.saver.restore(self.sess, tf_model_path)

    def init_op(self):
        self.sess = tf.Session(graph=self.graph)
        self.sess.run(self.init)
        self.summary_writer = tf.summary.FileWriter(self.logdir, self.sess.graph)




    def build_graph(self):
        self.graph = tf.Graph()#生成新的计算图
        self.sess = tf.Session(graph=self.graph)

        with self.graph.as_default():#获取当前默认计算图
            #建立输入占位符
            #train_inputs是输入单词索引,用整型表示。
            #train_labels是上下文的目标单词索引。
            self.train_inputs = tf.placeholder(tf.int32, shape=[self.batch_size])#placeholder在执行的时候赋具体的值
            self.train_labels = tf.placeholder(tf.int32, shape=[self.batch_size, 1])

            # 嵌套参数矩阵:初始化词向量shape: (20035,200) 这里vocab_size为20035 
            # tf.random_uniform返回[self.vocab_size, self.embedding_size]的矩阵, 产生的值均匀分布
            self.embedding_dict = tf.Variable(
                tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0)
            )

            #对语料库中的每个单词定义一个权重值和偏差值。tf.truncated_normal从截断的正态分布中输出随机值。
            #shape: (20035,200),
            self.nce_weight = tf.Variable(tf.truncated_normal([self.vocab_size, self.embedding_size],
                                                              stddev=1.0 / math.sqrt(self.embedding_size)))

            self.nce_biases = tf.Variable(tf.zeros([self.vocab_size]))#shape:(20035)

            # 对批数据中的单词建立嵌套向量.选取embedding_dict里面索引对应的元素
            embed = tf.nn.embedding_lookup(self.embedding_dict, self.train_inputs)

            # 根据每个单词的嵌套向量,使用噪声-比对的训练方式预测目标单词
            #计算 NCE 损失函数.
            self.loss = tf.reduce_mean(
                tf.nn.nce_loss(
                    weights=self.nce_weight,
                    biases=self.nce_biases,
                    labels=self.train_labels,
                    inputs=embed,
                    num_sampled=self.num_sampled,
                    num_classes=self.vocab_size
                )
            )

            # tensorboard 使用
            tf.summary.scalar('loss', self.loss)

            #  使用随机梯度下降算法,计算相应梯度和更新参数的节点
            self.train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(self.loss)

            # 计算与指定若干单词的相似度
            self.test_word_id = tf.placeholder(tf.int32, shape=[None])
            vec_l2_model = tf.sqrt(
                tf.reduce_sum(tf.square(self.embedding_dict), 1, keep_dims=True)
            )

            avg_l2_model = tf.reduce_mean(vec_l2_model)
            # tensorboard 使用
            tf.summary.scalar('avg_vec_model', avg_l2_model)
            # 归一化
            self.normed_embedding = self.embedding_dict / vec_l2_model

            test_embed = tf.nn.embedding_lookup(self.normed_embedding, self.test_word_id)
            # 定义相似度
            self.similarity = tf.matmul(test_embed, self.normed_embedding, transpose_b=True)

            print("tf.print", tf.Print(self.embedding_dict, [self.embedding_dict], summarize=200))
            embedding_dict_test = tf.Print(self.embedding_dict, ["embedding_dict:",self.embedding_dict.shape,self.embedding_dict], first_n=2, summarize=200)



            # # 变量初始化
            self.init = tf.global_variables_initializer()
            self.sess.run(self.init)
            self.sess.run(embedding_dict_test)


            self.merged_summary_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(self.logdir, self.sess.graph)
            self.saver = tf.train.Saver()

    def train_by_sentence(self, input_sentence=[]):
        #  input_sentence: [sub_sent1, sub_sent2, ...]
        # 每个sub_sent是一个单词序列
        sent_num = input_sentence.__len__()
        batch_inputs = []
        batch_labels = []
        #sent 例如 <class 'list'>: [['\u3000', '\u3000', '金庸', '倚天', '屠龙记']]
        for sent in input_sentence:
            for i in range(sent.__len__()):
                # 句子的起始和终止位置
                start = max(0, i - self.win_len) # 例如start =2
                end = min(sent.__len__(), i + self.win_len + 1) #例如 end =5
                for index in range(start, end):
                    if index == i:
                        continue
                    else:
                        # 上下文和label
                        #  例如sent:<class 'list'>: ['\u3000', '\u3000', '金庸', '倚天', '屠龙记'] i=4
                        senti = sent[i]
                        sentindex =sent[index]
                        input_id = self.word2id.get(sent[i]) #例如 i=4 时为屠龙记, 在 word2id中 '屠龙记': 1251,input_id =1251
                        label_id = self.word2id.get(sent[index]) #例如 index =2时为金庸,  在 word2id中'金庸': 1250  label_id 1250
                        if not (input_id and label_id):
                            continue
                        batch_inputs.append(input_id) #<class 'list'>: [1, 1, 1, 1, 1, 1250, 1250, 1250, 1250, 668, 668, 668, 1251, 1251]
                        batch_labels.append(label_id) #<class 'list'>: [1, 1250, 1, 1250, 668, 1, 1, 668, 1251, 1, 1250, 1251, 1250, 668]
        if len(batch_inputs) == 0:
            return
        # 为使用numpy reshape 先转换为numpy格式
        batch_inputs = np.array(batch_inputs, dtype=np.int32)
        batch_labels = np.array(batch_labels, dtype=np.int32)
        # 将label转换为列格式
        batch_labels = np.reshape(batch_labels, [batch_labels.__len__(), 1])

        feed_dict = {
            self.train_inputs: batch_inputs,
            self.train_labels: batch_labels
        }
        #训练模型,给占位符填充feed_dict数据,调用 session.run运行。
        _, loss_val, summary_str = self.sess.run([self.train_op, self.loss, self.merged_summary_op],
                                                 feed_dict=feed_dict)

        # train loss
        self.train_loss_records.append(loss_val)
        # self.train_loss_k10 = sum(self.train_loss_records)/self.train_loss_records.__len__()
        self.train_loss_k10 = np.mean(self.train_loss_records)

        if self.train_sents_num % 1000 == 0:
            self.summary_writer.add_summary(summary_str, self.train_sents_num)
            print("{a} sentences dealed, loss: {b}"
                  .format(a=self.train_sents_num, b=self.train_loss_k10))

        # train times
        self.train_words_num += batch_inputs.__len__()
        self.train_sents_num += input_sentence.__len__()
        self.train_times_num += 1

    def cal_similarity(self, test_word_id_list, top_k=10):
        sim_matrix = self.sess.run(self.similarity, feed_dict={self.test_word_id: test_word_id_list})# test_word_id_list:<class 'list'>: [2264, 1980]
        sim_mean = np.mean(sim_matrix)
        sim_var = np.mean(np.square(sim_matrix - sim_mean))
        test_words = []
        near_words = []
        for i in range(test_word_id_list.__len__()):
            test_words.append(self.vocab_list[test_word_id_list[i]])
            nearst_id = (-sim_matrix[i, :]).argsort()[1:top_k + 1] #[ 644 1055   53 1422 6122  637 3742  134  244 3349]
            nearst_word = [self.vocab_list[x] for x in nearst_id] #<class 'list'>: ['踢', '大海', '瞧', '出现', '知音', '远', '地上', '三个', '后来', '冒险']
            near_words.append(nearst_word)
        return test_words, near_words, sim_mean, sim_var

    def save_model(self, save_path):

        if os.path.isfile(save_path):
            raise RuntimeError('the save path should be a dir')
        if not os.path.exists(save_path):
            os.mkdir(save_path)

        # 记录模型各参数
        model = {}
        var_names = ['vocab_size',  # int       model parameters
                     'vocab_list',  # list
                     'learning_rate',  # int
                     'word2id',  # dict
                     'embedding_size',  # int
                     'logdir',  # str
                     'win_len',  # int
                     'num_sampled',  # int
                     'train_words_num',  # int       train info
                     'train_sents_num',  # int
                     'train_times_num',  # int
                     'train_loss_records',  # int   train loss
                     'train_loss_k10',  # int
                     ]
        for var in var_names:
            model[var] = eval('self.' + var)



        param_path = os.path.join(save_path, 'params.pkl')
        if os.path.exists(param_path):
            os.remove(param_path)
        with open(param_path, 'wb') as f:
            pkl.dump(model, f)

        # 记录tf模型
        tf_path = os.path.join(save_path, 'tf_vars')
        if os.path.exists(tf_path):
            os.remove(tf_path)
        self.saver.save(self.sess, tf_path)

    def load_model(self, model_path):
        if not os.path.exists(model_path):
            raise RuntimeError('file not exists')
        param_path = os.path.join(model_path, 'params.pkl')
        with open(param_path, 'rb') as f:
            model = pkl.load(f)
            self.vocab_list = model['vocab_list']
            self.vocab_size = model['vocab_size']
            self.logdir = model['logdir']
            self.word2id = model['word2id']
            self.embedding_size = model['embedding_size']
            self.learning_rate = model['learning_rate']
            self.win_len = model['win_len']
            self.num_sampled = model['num_sampled']
            self.train_words_num = model['train_words_num']
            self.train_sents_num = model['train_sents_num']
            self.train_times_num = model['train_times_num']
            self.train_loss_records = model['train_loss_records']
            self.train_loss_k10 = model['train_loss_k10']
  • 本文使用的倚天屠龙记的小说数据源,部分内容如下:
   金庸《倚天屠龙记》
  一   天涯思君不可忘
  “春游浩荡,是年年寒食,梨花时节。白锦无纹香烂漫,玉树琼苞堆雪。静夜沉沉,浮光霭霭,冷浸溶溶月。人间天上,烂银霞照通彻。浑似姑射真人,天姿灵秀,意气殊高洁。万蕊参差谁信道,不与群芳同列。浩气清英,仙才卓荦,下土难分别。瑶台归去,洞天方看清绝。”
  作这一首《无俗念》词的,乃南宋末年一位武学名家,有道之士。此人姓丘,名处机,道号长春子,名列全真七子之一,是全真教中出类拔萃的人物。《词品》评论此词道:“长春,世之所谓仙人也,而词之清拔如此”。这首词诵的似是梨花,其实词中真意却是赞誉一位身穿白衣的美貌少女,说她“浑似姑射真人,天姿灵秀,意气殊高洁”,又说她“浩气清英,仙才卓荦”,“不与群芳同列”。词中所颂这美女,乃古墓派传人小龙女。她一生爱穿白衣,当真如风拂玉树,雪裹琼苞,兼之生性清冷,实当得起“冷浸溶溶月”的形容,以“无俗念”三字赠之,可说十分贴切。长春子丘处机和她在终南山上比邻而居,当年一见,便写下这首词来。
  这时丘处机逝世已久,小龙女也已嫁与神雕大侠杨过为妻。在河南少室山山道之上,却另有一个少女,正在低低念诵此词。这少女十八九岁年纪,身穿淡黄衣衫,骑着一头青驴,正沿山道缓缓而上,心中默想:“也只有龙姊姊这样的人物,才配得上他。”这一个“他”字,指的自然是神雕大侠杨过了。她也不拉缰绳,任由那青驴信步而行,一路上山。过了良久,她又低声吟道:“欢乐趣,离别苦,就中更有痴儿女。君应有语,渺万里层云,千山暮雪,只影向谁去?”
  她腰悬短剑,脸上颇有风尘之色,显是远游已久;韶华如花,正当喜乐无忧之年,可是容色间却隐隐有懊闷意,似是愁思袭人,眉间心上,无计回避。
  这少女姓郭,单名一个襄字,乃大侠郭靖和女侠黄蓉的次女,有个外号叫做“小东邪”。她一驴一剑,只身漫游,原想排遣心中愁闷,岂知酒入愁肠固然愁上加愁,而名山独游,一般的也是愁闷徒增。河南少室山山势颇陡,山道却是一长列宽大的石级,规模宏伟,工程着实不小,那是唐朝高宗为临幸少林寺而开凿,共长八里。郭襄骑着青驴委折而上,只见对面山上五道瀑布飞珠溅玉,奔泻而下,再俯视群山,已如蚁蛭。顺着山道转过一个弯,遥见黄墙碧瓦,好大一座寺院。
  她望着连绵屋宇出了一会神,心想:“少林寺向为天下武学之源,但华山两次论剑,怎地五绝之中并无少林寺高僧?难道寺中和尚自忖没有把握,生怕堕了威名,索性便不去与会?又难道众僧侣修为精湛,名心尽去,武功虽高,却不去和旁人争强赌胜?”她下了青驴,缓步走向寺前,只见树木森森,荫着一片碑林。石碑大半已经毁破,字迹模糊,不知写着些甚么。心想:“便是刻凿在石碑上的字,年深月久之后也须磨灭,如何刻在我心上的,却是时日越久反而越加清晰?”瞥眼只见一块大碑上刻着唐太宗赐少林寺寺僧的御札,嘉许少林寺僧立功平乱。碑文中说唐太宗为秦王时,带兵讨伐王世充,少林寺和尚投军立功,最著者共一十三人。其中只昙宗一僧受封为大将军,其余十二僧不愿为官,唐太宗各赐紫罗袈裟一袭。她神驰想象:“当隋唐之际,少林寺武功便已名驰天下,数百年来精益求精,这寺中卧虎藏龙,不知有多少好手。”郭襄自和杨过、小龙女夫妇在华山绝顶分手后,三年来没得到他二人半点音讯。她心中长自记挂,于是禀明父母,说要出来游山玩水,实则是打听杨过的消息,她倒也不一定要和他夫妇会面,只须听到一些杨过如何在江湖上行侠的讯息,也便心满意足了。偏生一别之后,他夫妇从此便不在江湖上露面,不知到了何处隐居,郭襄自北而南,又从东至西,几乎踏遍了大半个中原,始终没听到有人说起神雕大侠杨过的近讯。这一日她到了河南,想起少林寺中有一位僧人无色禅师是杨过的好友,自己十六岁生日之时,无色瞧在杨过的面上,曾托人送来一件礼物,虽然从未和他见过面,但不妨去问他一问,说不定他会知道杨过的踪迹,这才上少林寺来。正出神间,忽听得碑林旁树丛后传出一阵铁链当啷之声,一人诵念佛经:“是时药叉共王立要,即于无量百千万亿大众之中,说胜妙伽他曰:由爱故生忧,由爱故生怖;若离于爱者,无忧亦无怖……”郭襄听了这四句偈言,不由得痴了,心中默默念道:“由爱故生忧,由爱故生怖;若离于爱者,无忧亦无怖。”只听得铁链拖地和念佛之声渐渐远去。郭襄低声道:“我要问他,如何才能离于爱,如何能无忧无怖?”随手将驴缰在树上一绕,拨开树丛,追了过去。只见树后是一条上山的小径,一个僧人挑了一对大桶,正缓缓往山上走去。郭襄快步跟上,奔到距那僧人七八丈处,不由得吃了一惊,只见那僧人挑的是一对大铁桶,比之寻常水桶大了两倍有余,那僧人颈中、手上、脚上,更绕满了粗大的铁链,行走时铁链拖地,不停发出声响。这对大铁桶本身只怕便有二百来斤,桶中装满了水,重量更是惊人。郭襄叫道:“大和尚,请留步,小女子有句话请教。”
  • 加载小说文本,进行单词切分以后的单词列表vocab_list,内容如下:

  • 词频云图的一些内容,也可以了解一下:

  • 词频文件cipin的部分记录如下:
    ,word,计数
    18405,道,1699
    12, ,1521
    8262,张翠山,1112
    2891,便,728
    883,不,648
    10,…,600
    17124,说,591
    12217,殷素素,543
    754,上,511
    1412,中,502
    4590,却,491
    17160,说道,490
    4683,去,463
    2175,人,445
    .......
    停用词文件stop_words_zh_UTF-8.txt从网上下载的,部分内容如下:
?
、
。
“
”
《
》
!
,
:
;
?
啊
阿
哎
哎呀
哎哟
唉
俺
.......
  • word2id.txt表示单词与索引之间的相互映射对应关系,word2id.txt的部分内容如下:
word2id:
{'道': 0, '\u3000': 1, '张翠山': 2, '便': 3, '不': 4, '…': 5, '说': 6, '殷素素': 7, '上': 8, '中': 9, '却': 10, '说道': 11, '去': 12, '人': 13, '听': 14, '谢逊': 15, '见': 16, '都': 17, '已': 18, '武功': 19, '一个': 20, '张三丰': 21, '派': 22, '只': 23, '‘': 24, '俞莲舟': 25, '’': 26, '少林': 27, '大': 28, '好': 29, '无忌': 30, '师父': 31, '下': 32, '弟子': 33, '俞岱岩': 34, '一声': 35, '便是': 36, '没': 37, '想': 38, '心中': 39, '只见': 40, '再': 41, '还': 42, '罢': 43, '事': 44, '不是': 45, '之中': 46, '不知': 47, '走': 48, '两人': 49, '天鹰': 50, '时': 51, '出': 52, '瞧': 53, '都大锦': 54, '镖局': 55, '少林寺': 56, '出来': 57, '武当派': 58, '请': 59, '教': 60, '心想': 61, '知道': 62, '今日': 63, '二人': 64, '不敢': 65, '郭襄': 66, '不能': 67, '死': 68, '宋远桥': 69, '突然': 70, '无': 71, '何足道': 72, '当下': 73, '武当': 74, '后': 75, '笑': 76, '原来': 77, '甚': 78, '起来': 79, '觉远': 80, '昆仑': 81, '常遇春': 82, '无色': 83, '身子': 84, '话': 85, '竟': 86, '站': 87, '三人': 88, '老者': 89, '张松溪': 90, '之下': 91, '使': 92, '难道': 93, '问': 94, '这般': 95, '喝道': 96, '跟着': 97, '左手': 98, '江湖': 99, '杀': 100, '之后': 101, '字': 102, '僧人': 103, '这位': 104, '孩子': 105, '两个': 106, '龙门': 107, '却是': 108, '脸上': 109, '伸手': 110, '才': 111, '功夫': 112, '出手': 113, '少女': 114, '张君宝': 115, '性命': 116, '宝刀': 117, '张五侠': 118, '不可': 119, '登时': 120, '大师': 121, '说话': 122, '一招': 123, '均': 124, '殷梨亭': 125, '屠龙刀': 126, '吃': 127, '仍': 128, '长剑': 129, '和尚': 130, '身上': 131, '右手': 132, '张无忌': 133, '三个': 134, '当真': 135, '一人': 136, '姑娘': 137, '兵刃': 138, '心下': 139, '问道': 140, '听到': 141, '过去': 142, '倒': 143, .........

代码的运行结果如下,从中可以看到小说文本中分别与['郭靖', '黄蓉']关联较密切的词语:

G:\ProgramData\Anaconda3\python.exe D:/PycharmProjects/Tensorflow_2018_test/dzh_word2vec/tensorflow_word2vec_example/word2vec_full_code.py
停用词读取完毕,共505个单词
Building prefix dict from the default dictionary ...
Loading model from cache C:\Users\lenovo\AppData\Local\Temp\jieba.cache
Loading model cost 1.306 seconds.
Prefix dict has been built succesfully.
D:/PycharmProjects/Tensorflow_2018_test/dzh_word2vec/tensorflow_word2vec_example/word2vec_full_code.py:34: FutureWarning: using a dict on a Series for aggregation
is deprecated and will be removed in a future version
  "计数": np.size
文本中总共有94582个单词,不重复单词数20035,选取前40000个单词进入词典
2018-07-28 09:11:44.245468: W c:\l\tensorflow_1501918863922\work\tensorflow-1.2.1\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE instructions, but these are available on your machine and could speed up CPU computations.
2018-07-28 09:11:44.245886: W c:\l\tensorflow_1501918863922\work\tensorflow-1.2.1\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE2 instructions, but these are available on your machine and could speed up CPU computations.
2018-07-28 09:11:44.246386: W c:\l\tensorflow_1501918863922\work\tensorflow-1.2.1\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE3 instructions, but these are available on your machine and could speed up CPU computations.
2018-07-28 09:11:44.247435: W c:\l\tensorflow_1501918863922\work\tensorflow-1.2.1\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations.
2018-07-28 09:11:44.247850: W c:\l\tensorflow_1501918863922\work\tensorflow-1.2.1\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations.
2018-07-28 09:11:44.248283: W c:\l\tensorflow_1501918863922\work\tensorflow-1.2.1\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations.
2018-07-28 09:11:44.248753: W c:\l\tensorflow_1501918863922\work\tensorflow-1.2.1\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX2 instructions, but these are available on your machine and could speed up CPU computations.
2018-07-28 09:11:44.249188: W c:\l\tensorflow_1501918863922\work\tensorflow-1.2.1\tensorflow\core\platform\cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use FMA instructions, but these are available on your machine and could speed up CPU computations.
tf.print Tensor("Print:0", shape=(20035, 200), dtype=float32)
2018-07-28 09:11:45.227779: I c:\l\tensorflow_1501918863922\work\tensorflow-1.2.1\tensorflow\core\kernels\logging_ops.cc:79] [embedding_dict:][20035 200][[-0.77914405 0.79151034 -0.010726213 -0.25953674 -0.18441415 0.91375113 0.53179407 -0.36695218 0.38310695 0.38405275 0.62971759 0.43836164 -0.081039906 0.9883554 -0.83298779 -0.62883639 0.9534657 0.60202217 -0.073337078 -0.34434557 0.67567182 -0.43930292 -0.14380455 0.46865034 0.44432259 0.47929859 0.032618046 0.099161386 0.414644 -0.41022897 0.25183225 0.99723482 -0.043951273 -0.44877195 -0.10115552 0.19715476 0.3863771 -0.35254979 0.88766885 0.16290379 0.44491267 0.13098645 0.70956707 -0.68307352 -0.92652154 -0.52051234 -0.24961495 -0.624187 0.53555775 0.075112343 -0.25340271 0.50158167 0.60081315 0.17040896 -0.62421918 -0.067055941 -0.87068582 -0.52720952 -0.2524085 0.33720016 -0.38160658 -0.92821479 -0.82829 -0.26891422 -0.982393 -0.34348178 -0.34593725 -0.73086143 -0.92031336 -0.83723879 0.20397139 0.63790631 -0.99155068 -0.38665795 -0.6032877 0.35202479 -0.012173414 -0.29592013 -0.64303589 0.67022085 0.42715836 -0.1804142 -0.37994146 -0.55125475 0.80090809 -0.95802307 -0.3348639 0.26535749 0.13348079 -0.060599327 0.55445123 -0.0638814 -0.012206316 -0.019565344 0.55873513 -0.073151588 -0.33200455 0.37471032 -0.45809937 -0.96168089 -0.016098976 -0.249233 -0.75378108 0.4429698 -0.5335927 0.038208246 -0.10896945 -0.40880966 -0.44138503 0.54107523 0.36596155 0.077687025 0.805146 -0.40189648 -0.75402045 -0.57840061 0.093622923 0.15633225 0.18419242 0.0830667 -0.71271563 -0.317163 0.39653039 0.74407983 -0.9308939 0.60485673 0.33028722 -0.500231 0.00043010712 0.11186934 0.19575167 -0.12869787 -0.25232434 -0.85759377 0.42944813 0.97474 0.82691336 -0.45329952 0.29421616 0.79416966 0.55594134 -0.38319588 -0.66161847 -0.49085307 0.6111753 -0.20636749 0.092241764 -0.99728036 -0.25331211 -0.40989685 0.14743829 -0.43501234 0.56172585 -0.91712523 0.8947978 -0.79998159 -0.017401218 -0.96281195 -0.37412858 -0.37546849 0.92726207 0.0024271011 0.59224939 0.70488381 -0.84982991 -0.91854858 -0.4590621 0.10472703 -0.48563147 -0.70031667 -0.16588879 0.80549407 0.91376305 -0.42619705 0.62471294 0.88853693 -0.88037324 -0.59386659 -0.39487314 -0.63856006 0.1262846 -0.38746119 0.75666595 0.15690565 -0.6714437 -0.039504051 0.03041029 0.55749846 0.65981865 -0.76707864 0.0351007 0.845726 -0.29609585 0.53562689 0.16918397 -0.32714272 -0.93701482 0.23515987 -0.51649261 -0.87756014]...]
0 sentences dealed, loss: 353.5973815917969
1000 sentences dealed, loss: 222.4059600830078
2000 sentences dealed, loss: 161.6058807373047
3000 sentences dealed, loss: 117.25419616699219
4000 sentences dealed, loss: 92.17009735107422
5000 sentences dealed, loss: 74.87251281738281
6000 sentences dealed, loss: 49.95429611206055
7000 sentences dealed, loss: 46.26612091064453
8000 sentences dealed, loss: 40.01551055908203
9000 sentences dealed, loss: 34.90557098388672
['郭靖', '黄蓉'] [['之际', '丁', '功夫', '身穿', '话', '居心', '取出', '当为', '睡', '海豹'], ['正要', '师父', '老', '到底', '说', '殷素素', '之后', '干', '都大锦', '千千万万']] 0.0398073 0.00620452

Process finished with exit code 0

其中Tensorflow embedding_dict Tensor变量的调试打印值如下:

2018-07-28 09:11:45.227779: I c:\l\tensorflow_1501918863922\work\tensorflow-1.2.1\tensorflow\core\kernels\logging_ops.cc:79] [embedding_dict:][20035 200][[-0.77914405 0.79151034 -0.010726213 -0.25953674 -0.18441415 0.91375113 0.53179407 -0.36695218 0.38310695 0.38405275 0.62971759 0.43836164 -0.081039906 0.9883554 -0.83298779 -0.62883639 0.9534657 0.60202217 -0.073337078 -0.34434557 0.67567182 -0.43930292 -0.14380455 0.46865034 0.44432259 0.47929859 0.032618046 0.099161386 0.414644 -0.41022897 0.25183225 0.99723482 -0.043951273 -0.44877195 -0.10115552 0.19715476 0.3863771 -0.35254979 0.88766885 0.16290379 0.44491267 0.13098645 0.70956707 -0.68307352 -0.92652154 -0.52051234 -0.24961495 -0.624187 0.53555775 0.075112343 -0.25340271 0.50158167 0.60081315 0.17040896 -0.62421918 -0.067055941 -0.87068582 -0.52720952 -0.2524085 0.33720016 -0.38160658 -0.92821479 -0.82829 -0.26891422 -0.982393 -0.34348178 -0.34593725 -0.73086143 -0.92031336 -0.83723879 0.20397139 0.63790631 -0.99155068 -0.38665795 -0.6032877 0.35202479 -0.012173414 -0.29592013 -0.64303589 0.67022085 0.42715836 -0.1804142 -0.37994146 -0.55125475 0.80090809 -0.95802307 -0.3348639 0.26535749 0.13348079 -0.060599327 0.55445123 -0.0638814 -0.012206316 -0.019565344 0.55873513 -0.073151588 -0.33200455 0.37471032 -0.45809937 -0.96168089 -0.016098976 -0.249233 -0.75378108 0.4429698 -0.5335927 0.038208246 -0.10896945 -0.40880966 -0.44138503 0.54107523 0.36596155 0.077687025 0.805146 -0.40189648 -0.75402045 -0.57840061 0.093622923 0.15633225 0.18419242 0.0830667 -0.71271563 -0.317163 0.39653039 0.74407983 -0.9308939 0.60485673 0.33028722 -0.500231 0.00043010712 0.11186934 0.19575167 -0.12869787 -0.25232434 -0.85759377 0.42944813 0.97474 0.82691336 -0.45329952 0.29421616 0.79416966 0.55594134 -0.38319588 -0.66161847 -0.49085307 0.6111753 -0.20636749 0.092241764 -0.99728036 -0.25331211 -0.40989685 0.14743829 -0.43501234 0.56172585 -0.91712523 0.8947978 -0.79998159 -0.017401218 -0.96281195 -0.37412858 -0.37546849 0.92726207 0.0024271011 0.59224939 0.70488381 -0.84982991 -0.91854858 -0.4590621 0.10472703 -0.48563147 -0.70031667 -0.16588879 0.80549407 0.91376305 -0.42619705 0.62471294 0.88853693 -0.88037324 -0.59386659 -0.39487314 -0.63856006 0.1262846 -0.38746119 0.75666595 0.15690565 -0.6714437 -0.039504051 0.03041029 0.55749846 0.65981865 -0.76707864 0.0351007 0.845726 -0.29609585 0.53562689 0.16918397 -0.32714272 -0.93701482 0.23515987 -0.51649261 -0.87756014]...]

也可以使用Tensorboard进行调试:


C:\Users\lenovo>tensorboard --logdir=D://PycharmProjects//Tensorflow_2018_test//dzh_word2vec//tensorflow_word2vec_example//tensorboard
Starting TensorBoard b'54' at http://duanzhihua:6006
(Press CTRL+C to quit)


打开Tensorboard的页面,http://duanzhihua:6006页面显示如下:

 

“全面化人工智能可能意味着人类的终结...”机器可以自行启动,并且自动对自身进行重新设计,速率也会越来越快。受到漫长的生物进化历程的限制,人类无法与之竞争,终将被取代。“ ——史蒂芬·霍金

猜你喜欢

转载自blog.csdn.net/duan_zhihua/article/details/81257323
今日推荐