特征提升之特征提取-基于文本数据的DictVectorizer,TfidfVectorizer在去掉停用词和不去停用词条件下的分析

版权声明:本文为博主原创文章,转载请注明出处 https://blog.csdn.net/King_key/article/details/79283582

1 特征提取:逐条将原始数据转化为特征向量的形式,其中涉及到数据特征的量化表示

实验数据:字典存储数据

2 实验代码及结果截图

#coding:utf-8
#导入朴素贝叶斯模型
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import classification_report
#导入新闻文本数据抓取器
from sklearn.datasets import fetch_20newsgroups
#即时下载新闻数据
news=fetch_20newsgroups(subset='all')
#导入分割数据集
from sklearn.cross_validation import train_test_split
#随机25%做测试数据,75%做训练集
x_train,x_test,y_train,y_test=train_test_split(news.data,news.target,test_size=0.25,random_state=33)
print '不去掉停用词'
#使用CountVectorizer并且不去掉停用词的条件下,对文本特征进行量化的朴素贝叶斯分类性能测试
#导入CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
#初始化
count_vec=CountVectorizer()
#只使用统计词频的方式将原始训练和测试文本转化为特征向量
x_count_train=count_vec.fit_transform(x_train)
x_count_test=count_vec.transform(x_test)

#初始化
mnb_count=MultinomialNB()
#进行参数学习
mnb_count.fit(x_count_train, y_train)
#输出模型准确性结果
print 'CountVectorizer模型准确性结果:',mnb_count.score(x_count_test,y_test)
#保存结果
y_count_predict=mnb_count.predict(x_count_test)
#输出详细的评价
print classification_report(y_test, y_count_predict,target_names=news.target_names)



#使用TfidVectorizer并且不去掉停用词的条件下,对文本特征进行量化的朴素贝叶斯分类性能测试
#导入
from sklearn.feature_extraction.text import TfidfVectorizer
#初始化
tfidf_vec=TfidfVectorizer()
#将原始训练和测试文本转化为特征向量
x_tfidf_train=tfidf_vec.fit_transform(x_train)
x_tfidf_test=tfidf_vec.transform(x_test)

#评估
mnb_tfidf=MultinomialNB()
mnb_tfidf.fit(x_tfidf_train, y_train)
print "TfidVectorizer模型准确性结果:",mnb_tfidf.score(x_tfidf_test,y_test)
y_tfidf_predict=mnb_tfidf.predict(x_tfidf_test)
print classification_report(y_test, y_tfidf_predict,target_names=news.target_names)



#去掉停用词
#CountVectorizer
count_filter_vec,tfidf_filter_vec=CountVectorizer(analyzer='word',stop_words='english'),TfidfVectorizer(analyzer='word',stop_words='english')
#量化处理
x_count_filter_train=count_filter_vec.fit_transform(x_train)
x_count_filter_test=count_filter_vec.transform(x_test)

x_tfidf_filter_train=tfidf_filter_vec.fit_transform(x_train)
x_tfidf_filter_test=tfidf_filter_vec.transform(x_test)

#数据预测与准确性评估
print '去掉停用词'
mnb_count_filter=MultinomialNB()
mnb_count_filter.fit(x_count_filter_train, y_train)
print 'CountVectorizer模型准确性结果:',mnb_count_filter.score(x_count_filter_test,y_test)
y_count_filter_predict=mnb_count_filter.predict(x_count_filter_test)
print classification_report(y_test, y_count_filter_predict,target_names=news.target_names)

mnb_tfidf_filter=MultinomialNB()
mnb_tfidf_filter.fit(x_tfidf_filter_train, y_train)
print 'TfidfVectorizer模型准确性结果:',mnb_tfidf_filter.score(x_tfidf_filter_test,y_test)
y_tfidf_filter_predict=mnb_tfidf_filter.predict(x_tfidf_filter_test)
print classification_report(y_test, y_tfidf_filter_predict,target_names=news.target_names)




猜你喜欢

转载自blog.csdn.net/King_key/article/details/79283582
今日推荐