gensim做主题模型

#coding:utf-8
import jieba
from gensim import corpora,models,similarities
from jieba import analyse

##引用TF-IDF关键词抽取接口
tfidf = analyse.extract_tags
##keywords = jieba.analyse.extract_tags(str1, topK=20, withWeight=True, allowPOS=('nz'))
##使用gensim的LDA做主题分析
#网址:https://blog.csdn.net/whzhcahzxh/article/details/17528261

##停用词
stop_word = []
with open('stop_word.txt') as fp:
    for line in fp.readlines():
        line = line.strip()
        if line == '':
            continue
        stop_word.append(line)

##语料的读取
sentence1 = []
with open('qisu_0.txt') as fp:
    for line in fp.readlines():
        line = line.strip()
        if line == '':
            continue
        sentence1.append(line)

sentence2 = []
with open('xunwen_0.txt') as fp:
    for line in fp.readlines():
        line = line.strip()
        if line == '':
            continue
        sentence2.append(line)

word1 = []
for doc in sentence1:
    ##1简单的分词
    word1.append(list(jieba.cut(doc)))

    ##2去停用词
    # linshi = []
    # for each in ' '.join(jieba.cut(doc)).split(' '):
    #     if each in stop_word:
    #         continue
    #     linshi.append(each)
    # word1.append(linshi)

    ##3关键信息的抽取
    # word1.append(tfidf(doc))

word2 = []
for doc in sentence2:
    ##1简单的分词
    # word2.append(list(jieba.cut(doc)))

    ##2去停用词
    # linshi = []
    # for each in ' '.join(jieba.cut(doc)).split(' '):
    #     if each in stop_word:
    #         continue
    #     linshi.append(each)
    # word2.append(linshi)

    ##3关键信息的抽取
    fenci = ' '.join(jieba.cut(doc)).split(' ')
    # for each in fenci:
    #     print each
    print '*******************************'
    guanjian = tfidf(doc,topK=10,allowPOS=('n','t','tg','m'))
    linshi = []
    str1 = ''
    for each in fenci:
        if each in guanjian:
            if str1.find(each) != -1:
                continue
            if str1 == '':
                str1 = each
            else:
                str1 = str1+' '+each
    print str1
    word2.append(str1.split(' '))
for each in word2[1]:
    print each
##得到的分词结果构造词典
dic1 = corpora.Dictionary(word1+word2)

# print(dic1.token2id) ##输出各个词或词组在字典中的编号

#为了方便看,我给了个循环输出:
# for word,index in dic1.token2id.items():
#     print word +u" 编号为:"+ str(index)

#######################################################
##词典生成之后,就开始生成语料了
corpus1 = [dic1.doc2bow(text) for text in word1]
corpus2 = [dic1.doc2bow(text) for text in word2]
# print(corpus1)
##做TF-IDF的变换
##可以理解成将用词频向量表示一句话,变换成为用词的重要性向量表示一句话
##TF-IDF:评估一字词对于一个文件集火一个语料库中的其中一份文件的重要程度,字词的重要性随着它在文件中出现的次数成正比增加,但是同时会随着它在语料库中出现的频率成反比下降

tfidf1 = models.TfidfModel(corpus1)
corpus_tfidf1 = tfidf1[corpus1]

index = similarities.SparseMatrixSimilarity(tfidf1[corpus1], num_features=len(dic1))

##针对corpus2中的每一个句子,将corpus1中的所有语料都要进行相似度计算
f1 = open('LDA_similarity.txt','w')
for i in range(len(corpus2)):
    sims = index[tfidf1[corpus2[i]]]
    f1.write(sentence2[i]+'\n')
    for j in range(len(sims)):
        f1.write(str(sims[j])+'\n'+sentence1[j]+'\n')
    f1.write('\n**************************************\n\n')
f1.close()

# ##回到tfidf转换,接着训练LSI模型
# lsi = models.LsiModel(corpus_tfidf1, id2word=dic1, num_topics=2)
# lsiout=lsi.print_topics(2)
# ##这就是基于SVD建立的两个主题模型内容
# print lsiout[0]
# print lsiout[1]


##同理做个LDA
# lda = models.LdaModel(corpus_tfidf1, id2word=dic1, num_topics=2)
# ldaOut=lda.print_topics(2)
# print ldaOut[0]
# print ldaOut[1]
# corpus_lda = lda[corpus_tfidf1]
# for doc in corpus_lda:
#     print doc


#
# sentences = ["我喜欢吃土豆","土豆是个百搭的东西","我不喜欢今天雾霾的北京","我不喜欢今天","2、2018年3月13日,犯罪嫌疑人罗付东、付刚窜至江宁街道江宁社区高公2号,翻墙入室.盗窃郭志荣家中现金人民币2000元,黄金手链一条及黄南京香烟一条;"]
# words=[]
# for doc in sentences:
#     words.append(list(jieba.cut(doc)))
# # print (words)
# dic = corpora.Dictionary(words)
# # print (dic)
# # print (dic.token2id)
# for word,index in dic.token2id.items():
#     print word +u" 编号为:"+ str(index)
# print(len(dic))
# corpus = [dic.doc2bow(text) for text in words]
# tfidf = models.TfidfModel(corpus)
#
# vec = [(0, 1), (4, 1)]
#
# corpus_tfidf = tfidf[corpus]
# index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=len(dic))
# sims = index[tfidf[vec]]
# print(list(enumerate(sims)))
#

猜你喜欢

转载自blog.csdn.net/yangfengling1023/article/details/82423235