gensim中文词向量训练实战

 

1.环境

2. 程序代码

程序目录如下所示:

data目录下stop_words.json是停用词列表

zhwiki是中文wiki预料

处理语料:去标签,分词

# 把一些警告的讯息暂时关掉
import warnings

warnings.filterwarnings('ignore')

# Utilities相关库
import os,json
import numpy as np
from tqdm import tqdm
import jieba
from gensim.corpora import WikiCorpus
from gensim.models.keyedvectors import KeyedVectors
from gensim.models import word2vec
from hanziconv import HanziConv


# 文档的根目录路径
ROOT_DIR = os.getcwd()
DATA_PATH = os.path.join(ROOT_DIR, "data")
MODEL_PATH = os.path.join(ROOT_DIR, "model")


# 停用词集合
stop_words = set(json.load(open(os.path.join(DATA_PATH, "stop_words.json"), 'r')))

# 将wiki数据集下载后进行提取,且将xml转换成plain txt
wiki_articles_xml_file = os.path.join(DATA_PATH, "zhwiki-latest-pages-articles.xml.bz2")
wiki_plain = os.path.join(DATA_PATH, "wiki_plaint")

# 使用gensim.WikiCorpus来读取wiki XML中的corpus
wiki_corpus = WikiCorpus(wiki_articles_xml_file, dictionary={})

# 迭代提取出來的词汇
with open(wiki_plain, 'w', encoding='utf-8') as output:
	text_count = 0
	for text in wiki_corpus.get_texts():
		# 把词汇写进文件中备用
		output.write(' '.join(text) + '\n')
		text_count += 1
		if text_count % 10000 == 0:
			print("目前已处理 %d 篇文章" % text_count)


wiki_articles_T2S = os.path.join(DATA_PATH, "wiki_T2S")

wiki_articles_T2S_f = open(wiki_articles_T2S, "w", encoding="utf-8")

# 迭代转换成plain text的wiki文件, 并透过HanziConv来进行简繁转换
with open(wiki_plain, "r", encoding="utf-8") as wiki_articles_txt:
	lines = wiki_articles_txt.readlines()
	for line in tqdm(lines):
		wiki_articles_T2S_f.write(HanziConv.toSimplified(line))
print("成功T2S转换!")
wiki_articles_T2S_f.close()


# 保存分词后的结果
wiki_segmented_file = os.path.join(DATA_PATH, "zhwiki_segmented")
wiki_articles_segmented = open(wiki_segmented_file, "w", encoding="utf-8")

# 迭代转换成繁体的wiki文檔, 并透过jieba来进行分词
with open(wiki_articles_T2S, "r", encoding="utf-8") as Corpus:
	st = ""
	lines = Corpus.readlines()
	for line in tqdm(lines):
		line = line.strip("\n")
		pos = jieba.cut(line, cut_all=False)
		for term in pos:
			if term not in stop_words:
				# 这一步不能写成st = st + term+ " ".这样速度出奇的慢,目前不知道原因
				st = st + term
				st = st + " "

	wiki_articles_segmented.write(st)
	wiki_articles_segmented.flush()

# 可参考 https://radimrehurek.com/gensim/models/word2vec.html 更多运用
print("word2vec模型训练中...")

# 加载文件
sentence = word2vec.Text8Corpus(wiki_segmented_file)
# 设置参数和训练模型(Train),由于在学校服务器上跑,workers设置为8,默认为4
model = word2vec.Word2Vec(sentence, size=300, window=10, min_count=5, workers=8, sg=1)
# 保存模型
word2vec_model_file = os.path.join(MODEL_PATH, "zhwiki_word2vec.model")
model.wv.save_word2vec_format(word2vec_model_file, binary=True)
# model.wv.save_word2vec_format("wiki300.model.bin", binary = True)
print("Word2vec模型已存储完毕")



# 测试模型
word_vectors = KeyedVectors.load_word2vec_format(word2vec_model_file, binary=True)
print("词汇相似词前 5 排序")
query_list = ['校长']
print("计算2个词汇间的 Cosine 相似度")
query_list = ['爸爸', '妈妈']

 整个训练时间大概半个小时左右。

参考:https://www.kesci.com/home/project/5b7a35c131902f000f551567

猜你喜欢

转载自blog.csdn.net/biubiubiu888/article/details/82898703