贝叶斯方法及电影评价实例

相关视频地址:

数据地址:链接:https://pan.baidu.com/s/1bWnoL757Sg7JLw8cPNtT9Q      提取码:mbsw 
 

Kaggle IMDB影评者情感褒贬分类问题,Kaggle地址为https://www.kaggle.com/c/word2vec-nlp-tutorial
原文使用的方法是word2vec将词语转为词向量,再用deep learning方式处理,我们这里使用TF-IDF作为特征,用最简单的朴素贝叶斯和逻辑回归尝试

import re  #正则表达式
from bs4 import BeautifulSoup  #html标签处理
import pandas as pd

def review_to_wordlist(review):
    '''
    把IMDB的评论转成词序列
    '''
    # 去掉HTML标签,拿到内容
    review_text = BeautifulSoup(review).get_text()
    # 用正则表达式取出符合规范的部分
    review_text = re.sub("[^a-zA-Z]"," ", review_text)
    # 小写化所有的词,并转成词list
    words = review_text.lower().split()
    # 返回words
    return words

# 使用pandas读入训练和测试csv文件
train = pd.read_csv('/Users/Hanxiaoyang/IMDB_sentiment_analysis_data/labeledTrainData.tsv', header=0, delimiter="\t", quoting=3)
test = pd.read_csv('/Users/Hanxiaoyang/IMDB_sentiment_analysis_data/testData.tsv', header=0, delimiter="\t", quoting=3 )
# 取出情感标签,positive/褒 或者 negative/贬
y_train = train['sentiment']
# 将训练和测试数据都转成词list
train_data = []
for i in xrange(0,len(train['review'])):
    train_data.append(" ".join(review_to_wordlist(train['review'][i])))
test_data = []
for i in xrange(0,len(test['review'])):
    test_data.append(" ".join(review_to_wordlist(test['review'][i])))


 

train_data
y_train
0        1
1        1
2        0
3        0
4        1
5        1
6        0
7        0
8        0
9        1
10       0
11       1
12       1
13       0
14       0
15       0
16       0
17       0
18       1
19       1
20       1
21       1
22       1
23       0
24       0
25       1
26       0
27       0
28       0
29       0
        ..
24970    1
24971    1
24972    1
24973    0
24974    1
24975    1
24976    0
24977    1
24978    1
24979    1
24980    1
24981    1
24982    0
24983    0
24984    0
24985    0
24986    1
24987    1
24988    1
24989    1
24990    1
24991    0
24992    0
24993    0
24994    0
24995    0
24996    0
24997    0
24998    0
24999    1
Name: sentiment, dtype: int64
from sklearn.feature_extraction.text import TfidfVectorizer as TFIV
# 初始化TFIV对象,去停用词,加2元语言模型
tfv = TFIV(min_df=3,  max_features=None, strip_accents='unicode', analyzer='word',token_pattern=r'\w{1,}', ngram_range=(1, 2), use_idf=1,smooth_idf=1,sublinear_tf=1, stop_words = 'english')
# 合并训练和测试集以便进行TFIDF向量化操作
X_all = train_data + test_data
len_train = len(train_data)

# 这一步有点慢,去喝杯茶刷会儿微博知乎歇会儿...
tfv.fit(X_all)
X_all = tfv.transform(X_all)
# 恢复成训练集和测试集部分
X = X_all[:len_train] 
X_test = X_all[len_train:]
# 多项式朴素贝叶斯
from sklearn.naive_bayes import MultinomialNB as MNB

model_NB = MNB()
model_NB.fit(X, y_train) #特征数据直接灌进来
MNB(alpha=1.0, class_prior=None, fit_prior=True)

from sklearn.cross_validation import cross_val_score
import numpy as np
print "多项式贝叶斯分类器20折交叉验证得分: ", np.mean(cross_val_score(model_NB, X, y_train, cv=20, scoring='roc_auc'))
# 多项式贝叶斯分类器20折交叉验证得分: 0.950837239
多项式贝叶斯分类器20折交叉验证得分:  0.950837239
# 折腾一下逻辑回归,恩
from sklearn.linear_model import LogisticRegression as LR
from sklearn.grid_search import GridSearchCV

# 设定grid search的参数
grid_values = {'C':[30]}  
# 设定打分为roc_auc
model_LR = GridSearchCV(LR(penalty = 'L2', dual = True, random_state = 0), grid_values, scoring = 'roc_auc', cv = 20) 
# 数据灌进来
model_LR.fit(X,y_train)
# 20折交叉验证,开始漫长的等待...
GridSearchCV(cv=20, estimator=LogisticRegression(C=1.0, class_weight=None, dual=True, 
             fit_intercept=True, intercept_scaling=1, penalty='L2', random_state=0, tol=0.0001),
        fit_params={}, iid=True, loss_func=None, n_jobs=1,
        param_grid={'C': [30]}, pre_dispatch='2*n_jobs', refit=True,
        score_func=None, scoring='roc_auc', verbose=0)
#输出结果
print model_LR.grid_scores_
mean: 0.96459, std: 0.00489, params: {'C': 30}

猜你喜欢

转载自blog.csdn.net/yiyongzhifu/article/details/89764622