微博评论的情感分析

#文本处理:情感分析,文本相似度,文本分类(tf-idf逆文档频率)
#NLP:字符串-向量化-贝叶斯训练-测试
#文本相似度:词频
#文本分类:TF-IDF(词频-逆文档频率)

#1.原始文本
#2.分词
#3.词行归一化
#4.去除停用词

import os,re
import numpy as np
import pandas as pd
import jieba.posseg as pseg
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfVectorizer

#https://blog.csdn.net/mpk_no1/article/details/71698725

dataset_path = './dataset'
text_filenames = ['0_simplifyweibo.txt', '1_simplifyweibo.txt',
                  '2_simplifyweibo.txt', '3_simplifyweibo.txt']

# 原始数据的csv文件
output_text_filename = 'raw_weibo_text.csv'
# 清洗好的文本数据文件
output_cln_text_filename = 'clean_weibo_text.csv'
stopwords1 = [line.rstrip() for line in open('./中文停用词库.txt', 'r', encoding='utf-8')]
stopwords = stopwords1

#原始数据处理:
'''
text_w_label_df_lst = []
for text_filename in text_filenames:
	text_file = os.path.join(dataset_path, text_filename)

	# 获取标签,即0, 1, 2, 3
	label = int(text_filename[0])

	# 读取文本文件
	with open(text_file, 'r', encoding='utf-8') as f:
		lines = f.read().splitlines()

	labels = [label] * len(lines)
	#print(labels)

	text_series = pd.Series(lines)
	label_series = pd.Series(labels)

	# 构造dataframe
	text_w_label_df = pd.concat([label_series, text_series], axis=1)
	text_w_label_df_lst.append(text_w_label_df)

result_df = pd.concat(text_w_label_df_lst, axis=0)
# 保存成csv文件
result_df.columns = ['label', 'text']
result_df.to_csv(os.path.join(dataset_path, output_text_filename),
                 index=None, encoding='utf-8')
'''

#1. 数据读取,处理,清洗,准备
'''
# 读取处理好的csv文件,构造数据集
text_df = pd.read_csv(os.path.join(dataset_path, output_text_filename),encoding='utf-8')
print(text_df)

def proc_text(raw_line):
    """
        处理每行的文本数据
        返回分词结果
    """
    # 1. 使用正则表达式去除非中文字符
    filter_pattern = re.compile('[^\u4E00-\u9FD5]+')
    chinese_only = filter_pattern.sub('', raw_line)

    # 2. 结巴分词+词性标注
    words_lst = pseg.cut(chinese_only)

    # 3. 去除停用词
    meaninful_words = []
    for word, flag in words_lst:
        # if (word not in stopwords) and (flag == 'v'):
            # 也可根据词性去除非动词等
        if word not in stopwords:
            meaninful_words.append(word)

    return ' '.join(meaninful_words)

# 处理文本数据
text_df['text'] = text_df['text'].apply(proc_text)

# 过滤空字符串
text_df = text_df[text_df['text'] != '']

# 保存处理好的文本数据
text_df.to_csv(os.path.join(dataset_path, output_cln_text_filename),index=None, encoding='utf-8')
print('完成,并保存结果。')
'''

# 2. 分割训练集、测试集
# 对应不同类别的感情:
# 0:喜悦
# 1:愤怒
# 2:厌恶
# 3:低落

clean_text_df = pd.read_csv(os.path.join(dataset_path, output_cln_text_filename),encoding='utf-8')
# 分割训练集和测试集
x_train, x_test, y_train, y_test = train_test_split(clean_text_df['text'].values, clean_text_df['label'].values,test_size=0.25)

# 3. 特征提取
# 计算词频
tf = TfidfVectorizer()
# 以训练集当中的词的列表进行每篇文章的重要性统计
x_train = tf.fit_transform(x_train)
print(tf.get_feature_names())

x_test = tf.transform(x_test)

# 4. 训练模型Naive Bayes
mlt = MultinomialNB(alpha=1.0)
# print(x_train.toarry())

mlt.fit(x_train, y_train)
y_predict = mlt.predict(x_test)
print("预测的文章类别为:", y_predict)

#5. 预测得出准确率  分类模型的评估标准-准确率和召回率(越高越好,预测结果的准确性)
print("预测的准确率:", mlt.score(x_test, y_test))

Word2vec可以将词语转换为高维向量空间中的向量表示,它能揭示上下文关系。首先使用word2vec,将其训练得到词向量作为特征权重,然后根据情感词典和词性的两种特征选择方法筛选出有价值的特征。

猜你喜欢

转载自blog.csdn.net/yitian1585531/article/details/84640871