TF-IDF method to extract text keywords

#!/usr/bin/python
# coding=utf-8

import codecs
import jieba.analyse
import jieba.posseg
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer

"""
       TF-IDF权重:
           1、CountVectorizer 构建词频矩阵
           2、TfidfTransformer 构建tfidf权值计算
           3、文本的关键字
           4、对应的tfidf矩阵
"""

jieba.load_userdict("/opt/pymodel/project_lyj_model/nlp_project/keyword_extraction/data/userdict.txt")


# 数据预处理操作:分词,去停用词,词性筛选
def dataPrepos(text, stopkey):
    l = []
    pos = ['n', 'nz', 'v', 'vd', 'vn', 'l', 'a', 'd']  # 定义选取的词性
    seg = jieba.posseg.cut(text)  # 分词
    for i in seg:
        if i.word not in stopkey and i.flag in pos:  # 去停用词 + 词性筛选
            l.append(i.word)
    return l


# tf-idf获取文本top10关键词
def getKeywords_tfidf(data, stopkey, topK):
    from_account_list, msg_times_tamp_list, body_list, odate_list, omonth_list = \
        data['from_account'], data['msg_times_tamp'], data['body'], data['odate'], data['omonth']
    corpus = []  # 将所有文档输出到一个list中,一行就是一个文档
    for index in range(len(body_list)):
        text = '%s' % (body_list[index])
        text = dataPrepos(text, stopkey)  # 文本预处理
        text = " ".join(text)  # 连接成字符串,空格分隔
        if len(text) > 0:
            corpus.append(text)

    # 1、构建词频矩阵,将文本中的词语转换成词频矩阵
    vectorizer = CountVectorizer()
    X = vectorizer.fit_transform(corpus)  # 词频矩阵,a[i][j]:表示j词在第i个文本中的词频
    # 2、统计每个词的tf-idf权值
    transformer = TfidfTransformer()
    tfidf = transformer.fit_transform(X)
    # 3、获取词袋模型中的关键词
    word = vectorizer.get_feature_names()
    # 4、获取tf-idf矩阵,a[i][j]表示j词在i篇文本中的tf-idf权重
    weight = tfidf.toarray()
    # 5、打印词语权重
    from_accounts, msg_times_tamps, bodys, odates, omonths, keys, other_keys = [], [], [], [], [], [], []
    for i in range(len(weight)):
        # print(u"-------这里输出第", i + 1, u"篇文本的词语tf-idf------")
        from_accounts.append(from_account_list[i])
        msg_times_tamps.append(msg_times_tamp_list[i])
        bodys.append(body_list[i])
        odates.append(odate_list[i])
        omonths.append(omonth_list[i])
        df_word, df_weight = [], []  # 当前文章的所有词汇列表、词汇对应权重列表
        for j in range(len(word)):
            # print(word[j], weight[i][j])
            df_word.append(word[j])
            df_weight.append(weight[i][j])
        df_word = pd.DataFrame(df_word, columns=['word'])
        df_weight = pd.DataFrame(df_weight, columns=['weight'])
        word_weight = pd.concat([df_word, df_weight], axis=1)  # 拼接词汇列表和权重列表
        word_weight = word_weight.sort_values(by="weight", ascending=False)  # 按照权重值降序排列
        keyword = np.array(word_weight['word'])  # 选择词汇列并转成数组格式
        word_split = [keyword[x] for x in range(0, topK)]  # 抽取前topK个词汇作为关键词
        word_split = " ".join(word_split)
        keys.append(word_split)


    result = pd.DataFrame({
    
    "from_account": from_accounts, "msg_times_tamp": msg_times_tamps,
                           "body": bodys, "odate": odates, "omonth": omonths, "key": keys},
                          columns=['from_account', 'msg_times_tamp', 'body', 'odate', 'omonth', 'key'])
    return result

def main():
    # 读取数据集
    dataFile = '/data/pymodel/project_lyj_model/nlp_project/keyword_extraction/data/download/im用户咨询_all.csv'
    data = pd.read_csv(dataFile, names=['from_account', 'msg_times_tamp', 'body', 'odate', 'omonth'])
    # 停用词表
    stopkey = [w.strip() for w in codecs.open('/opt/pymodel/project_lyj_model/nlp_project/keyword_extraction/data/stopWord.txt', 'r', encoding='utf-8').readlines()]
    # tf-idf关键词抽取
    result = getKeywords_tfidf(data, stopkey, 2)
    result.to_csv("/data/pymodel/project_lyj_model/nlp_project/keyword_extraction/data/im_TFIDF.csv", index=False, encoding='utf-8')


if __name__ == '__main__':
    main()

Guess you like

Origin blog.csdn.net/qq236237606/article/details/109720949