python learning-103-word2vec training word vector

Foreword:

  Train the data completed by word segmentation into word vectors through word2vec in the gensim package

#!/usr/bin/env python
# -*- coding: utf-8  -*-
# 使用gensim word2vec训练脚本获取词向量

import warnings

warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')  # 忽略警告

import logging
import os.path
import sys
import multiprocessing

from gensim.corpora import WikiCorpus
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence

def train_word2vec_save_model():
    # other---------------------------------------------------------------------------------------
    program = os.path.basename(sys.argv[0])
    logger = logging.getLogger(program)
    logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s', level=logging.INFO)
    logger.info("running %s" % ' '.join(sys.argv))
    # other---------------------------------------------------------------------------------------

    # inp为输入语料, outp1 为输出模型, outp2为原始c版本word2vec的vector格式的模型
    fdir = r'model/'
    # inp = fdir + '数据分词.txt'
    inp = r'data/data3.txt'
    outp1 = fdir + 'incivi.text.model'
    outp2 = fdir + 'incivi.text.vector'

    # 训练skip-gram模型
    model = Word2Vec(LineSentence(inp), size=100, window=5, min_count=2,
                     workers=multiprocessing.cpu_count())
    # 保存模型
    model.save(outp1)
    # 保存词向量
    model.wv.save_word2vec_format(outp2, binary=False)
    print('训练完成')

if __name__ == '__main__':
    train_word2vec_save_model()

Core code:

1. model = Word2Vec(LineSentence(inp), size=100, window=5, min_count=2,
                 workers=multiprocessing.cpu_count())
2. model.save(outp1)#size 向量维数;window 神经网络窗口;min——count词小于这个舍弃

Guess you like

Origin blog.csdn.net/u013521274/article/details/84995740