利用随机森林和梯度替身决策树对titanic数据进行分类,并对结果进行分析

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/u014727529/article/details/78393783
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction import DictVectorizer
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import classification_report


titanic = pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')
#特征选取
X = titanic[['pclass','age','sex']]
y = titanic['survived']
#对空白的age列进行填充,因为中位数和平均数对模型的影响最小,所以使用平均数进行填充
X['age'].fillna(X['age'].mean(),inplace=True)
#进行训练集和测试集的分割
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,random_state=33)


vec = DictVectorizer(sparse=False)
X_train = vec.fit_transform(X_train.to_dict(orient='record'))
print(vec.feature_names_)#经过特征转换以后,我们发现凡是类别形的特征都单独剥离出来,数值型的则保持不变
X_test = vec.transform(X_test.to_dict(orient='record'))#对测试数据进行特征转换


#使用单一决策树模型训练及分析数据
dtc = DecisionTreeClassifier()
dtc.fit(X_train,y_train)
dtc_y_predict = dtc.predict(X_test)


#使用随机森林分类器进行集成模型的训练及预测分析
rfc = RandomForestClassifier()
rfc.fit(X_train,y_train)
rfc_y_predict = rfc.predict(X_test)


#使用梯度提升决策树集成模型的训练及分析
gbc = GradientBoostingClassifier()
gbc.fit(X_train,y_train)
gbc_y_predict = gbc.predict(X_test)


print('The accuracy of decision tree is: ',dtc.score(X_test,y_test))
print(classification_report(dtc_y_predict,y_test))
print('\n'*2)
print('The accuracy of random forest classifier:',rfc.score(X_test,y_test))
print(classification_report(rfc_y_predict,y_test))
print('\n'*2)
print('The accuracy of gradient tree boosting',gbc.score(X_test,y_test))
print(classification_report(gbc_y_predict,y_test))

猜你喜欢

转载自blog.csdn.net/u014727529/article/details/78393783
今日推荐