Kaggle Titannic生存预测 80%开源方案

利用随机深林预测得到的准确率为80.0%,其他模型预测的准确率偏低,甚至有在baseline之下的。

#coding = utf-8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestRegressor

train = pd.read_csv("E:/tfcode/Titanic/train.csv")
test = pd.read_csv("E:/tfcode/Titanic/test.csv")
train.info()
test.info()
all_data = pd.concat([train,test], ignore_index=True)
PassengerId = test['PassengerId']
#新增Title特征,从姓名中提取乘客的称呼归纳为6类
all_data['Title'] = all_data['Name'].apply(lambda x:x.split(',')[1].split('.')[0].strip())
Title_Dict = {}
Title_Dict.update(dict.fromkeys(['Capt', 'Col', 'Major', 'Dr', 'Rev'], 'Officer'))
Title_Dict.update(dict.fromkeys(['Don', 'Sir', 'the Countess', 'Dona', 'Lady'], 'Royalty'))
Title_Dict.update(dict.fromkeys(['Mme', 'Ms', 'Mrs'], 'Mrs'))
Title_Dict.update(dict.fromkeys(['Mlle', 'Miss'], 'Miss'))
Title_Dict.update(dict.fromkeys(['Mr'], 'Mr'))
Title_Dict.update(dict.fromkeys(['Master','Jonkheer'], 'Master'))
all_data['Title'] = all_data['Title'].map(Title_Dict)
sns.barplot(x="Title", y="Survived", data=all_data, palette='Set3')

#新增famaily特征,先计算FamilySize=Parch+SibSp+1,然后把FamilySize分成3类
all_data['FamilySize'] = all_data['SibSp']+all_data['Parch']+1

#按生存率把FamilySize分为三类,构成FamilyLabel特征。
def Fam_label(s):
    if (s>=2) & (s<=4):
        return 2;
    elif ((s>4) & (s<=7)) | (s==1):
        return 1;
    elif (s>7):
        return 0;
all_data['FamilyLabel'] = all_data['FamilySize'].apply(Fam_label)
#新增Deck特征,先把Cabin空缺值填充为'Unknown',再提取Cabin中的首字母构成乘客的甲板号。
all_data['Cabin'] = all_data['Cabin'].fillna('Unknown')
all_data['Deck'] = all_data['Cabin'].str.get(0)
#新增TicketGroup特征,统计每个乘客的共票号数。
Ticket_Count = dict(all_data['Ticket'].value_counts())
all_data['TicketGroup'] = all_data['Ticket'].apply(lambda x:Ticket_Count[x])
#按生存率把TicketGroup分为三类。
def Ticket_Label(s):
    if (s>=2) & (s<=4):
        return 2;
    elif ((s>4) & (s<=8) | (s==1)):
        return 1;
    elif (s>8):
        return 0
all_data['TicketGroup'] = all_data['TicketGroup'].apply(Ticket_Label)
###缺失填充
#Age Feature:Age缺失量为263,缺失量较大,
# 用Sex, Title, Pclass三个特征构建随机森林模型,填充年龄缺失值。
age_df = all_data[['Age', 'Pclass', 'Sex', 'Title']]
age_df = pd.get_dummies(age_df)
known_age = age_df[age_df.Age.notnull()].as_matrix()
unknown_age = age_df[age_df.Age.isnull()].as_matrix()
y = known_age[:, 0]
x = known_age[:, 1:]
rfr = RandomForestRegressor(random_state=0,n_estimators=100,n_jobs=-1)
rfr.fit(x,y)
predictedAges = rfr.predict(unknown_age[:,1::])
all_data.loc[all_data.Age.isnull(), 'Age'] = predictedAges

all_data['Age'] = all_data['Age'].map(lambda x: 'child' if x<12 else 'youth' if x<30 else 'adlut' if x<60 else 'old' if x<75 else 'tooold' if x>=75 else 'null')
#Embarked Feature:Embarked缺失量为2,缺失Embarked信息的乘客的Pclass均为1,且Fare均为80,
# 因为Embarked为C且Pclass为1的乘客的Fare中位数为80,所以缺失值填充为C。
all_data['Embarked'] = all_data['Embarked'].fillna('C')
#Fare Feature:Fare缺失量为1,缺失Fare信息的乘客的Embarked为S,Pclass为3,
# 所以用Embarked为S,Pclass为3的乘客的Fare中位数填充。
fare = all_data[(all_data['Embarked']=='S')&(all_data['Pclass']==3)].Fare.median()
all_data['Fare'] = all_data['Fare'].fillna(fare)
all_data['Fare']=all_data['Fare'].map(lambda x:np.log(x+1))
all_data['Fare']=all_data['Fare'].map(lambda x: 'poor' if x<2.5 else 'rich')
all_data['SibSp']=all_data['SibSp'].map(lambda x: 'small' if x<1 else 'middle' if x<3 else 'large')

###同组识别
#因为普遍规律是女性和儿童幸存率高,成年男性幸存较低,所以我们把不符合普遍规律的反常组选出来单独处理。
# 把女性和儿童组中幸存率为0的组设置为遇难组,把成年男性组中存活率为1的设置为幸存组,推测处于遇难组
# 的女性和儿童幸存的可能性较低,处于幸存组的成年男性幸存的可能性较高。
all_data['Surname'] = all_data['Name'].apply(lambda x:x.split(',')[0].strip())
Surname_Count = dict(all_data['Surname'].value_counts())
all_data['FamilyGroup'] = all_data['Surname'].apply(lambda x:Surname_Count[x])


all_data = all_data[['SibSp','PassengerId','Survived', 'Pclass', 'Sex', 'Age', 'Fare', 'Embarked', 'Title', 'FamilyLabel', 'Deck', 'TicketGroup']]
all_data = pd.get_dummies(all_data)
train = all_data[all_data['Survived'].notnull()]
test = all_data[all_data['Survived'].isnull()].drop(['Survived'], axis=1)
labels = train['Survived']
features = pd.get_dummies(train.drop(['Survived'],axis=1))
test = pd.get_dummies(test)
train['Survived'] = train['Survived'].astype(np.int64, copy=False)
train.info()
test.info()
#建模和优化

from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score, roc_auc_score
from time import time
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from xgboost.sklearn import XGBClassifier

#定义通用函数框架
def fit_model(alg,parameters):
    X=features
    y=labels  #由于数据较少,使用全部数据进行网格搜索
    scorer=make_scorer(roc_auc_score)  #使用roc_auc_score作为评分标准
    grid = GridSearchCV(alg,parameters,scoring=scorer,cv=5)  #使用网格搜索,出入参数
    start=time()  #计时
    grid=grid.fit(X,y)  #模型训练
    end=time()
    t=round(end-start,3)
    print(grid.best_params_)  #输出最佳参数
    print('searching time for {} is {} s'.format(alg.__class__.__name__,t)) #输出搜索时间
    return grid #返回训练好的模型

#列出需要使用的算法
alg1=DecisionTreeClassifier(random_state=29)
alg3=AdaBoostClassifier(random_state=29)
alg4=KNeighborsClassifier(n_jobs=-1)
alg5=XGBClassifier(random_state=29,n_jobs=-1)
alg6=SVC(probability=True,random_state=29)

#列出需要调整的参数范围
parameters1={'max_depth':range(1,10)}
parameters2_2 = {'max_depth':[6]}  #搜索空间太大,分两次调整参数
parameters3 = {'n_estimators':range(10,200,10),'learning_rate':[i/10.0 for i in range(5,15)]}
parameters4 = {'n_neighbors':range(2,10),'leaf_size':range(10,80,20)}
parameters5_1 = {'n_estimators':range(10,200,10)}
parameters5_2 = {'max_depth':range(1,10),'min_child_weight':range(1,10)}
parameters5_3 = {'subsample':[i/10.0 for i in range(1,10)], 'colsample_bytree':[i/10.0 for i in range(1,10)]}#搜索空间太大,分三次调整参数
parameters6 = {"C":range(1,20), "gamma": [0.05,0.1,0.15,0.2,0.25]}


#1.DecsionTreeClassifier
clf1=fit_model(alg1,parameters1)

#2.RandomForest
alg2 = RandomForestClassifier(random_state = 10, warm_start = True,
                                  n_estimators = 26,
                                  max_features = 'sqrt')
clf2 = fit_model(alg2, parameters2_2)
#3. AdaBoost
clf3 = fit_model(alg3, parameters3)
#4. KNN
clf4 = fit_model(alg4, parameters4)
#5. XgBoost第一次调参
clf5_m1=fit_model(alg5,parameters5_1)
#5. XgBoost第二次调参
alg5=XGBClassifier(n_estimators=140,random_state=29,n_jobs=-1)
clf5_m2=fit_model(alg5,parameters5_2)
#5. XgBoost第3次调参
alg5=XGBClassifier(n_estimators=140,max_depth=4,min_child_weight=5,random_state=29,n_jobs=-1)
clf5=fit_model(alg5,parameters5_3)
clf6=fit_model(alg6, parameters6)
#定义一个保存函数,将预测的结果保存为可以提交的格式
def save(clf,i):
    arr = pd.Series(clf.predict(test), name='Survived')
    arr = arr.astype(np.int64, copy=False)
    sub = pd.concat([PassengerId, arr], axis=1)
    sub.to_csv("res_tan_{}.csv".format(i), index=False)

i=1
for clf in [clf1,clf2,clf3,clf4,clf5,clf6]:
    save(clf,i)
    i=i+1

#定义多数投票函数
def major(i):
    vote=0
    for clf in [clf1,clf2,clf3,clf4,clf5,clf6]:
        pred=clf.predict(test[i:i+1])
        vote=vote+pred
    if vote>=2:
        result=1
    else:
        result=0
    return result

#调用投票函数,并将结果进行保存
L= range(test.shape[0])
pred=map(major,L)
arr = pd.Series(pred, name='Survived')
arr = arr.astype(np.int64, copy=False)
sub = pd.concat([PassengerId, arr], axis=1)
sub.to_csv("res_tan_7.csv", index=False)


猜你喜欢

转载自blog.csdn.net/just_sort/article/details/80029072