小项目(集成算法)--泰坦尼克船员获救预测

1.拿到数据集应该先分析数据(每个特征分析,缺失值等等)
2.数据预处理
首先简单看一下数据集的数据特征

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

titanic = pd.read_csv('titanic_train.csv')
print(titanic.describe())

在这里插入图片描述
我们分析数据时候发现age有缺失,但是age这个特征又很重要不能丢失,我们可以用中位数来填充(还算合理)

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

titanic = pd.read_csv('titanic_train.csv')
#print(titanic.describe())

titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median()) #用中位数填充Age的缺失值
print(titanic.describe())

在分析数据时发现Sex列的属性值是“male & female”,这是字符,我们转换成数值,同理将Embarked列也转化成数值

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

titanic = pd.read_csv('titanic_train.csv')
#print(titanic.describe())

titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median()) #用中位数填充Age的缺失值
#print(titanic.describe())

titanic.loc[titanic["Sex"] == "male","Sex"] = 0
titanic.loc[titanic["Sex"] == "female","Sex"] = 1
print(titanic["Sex"][:5]) #0 1 1 1 0
print("------------------------")

#将Enbarkes列的属性数值化
titanic["Embarked"] = titanic["Embarked"].fillna("S") #缺失值填充为最多的值
titanic.loc[titanic["Embarked"] == "S","Embarked"] = 0
titanic.loc[titanic["Embarked"] == "C","Embarked"] = 1
titanic.loc[titanic["Embarked"] == "Q","Embarked"] = 2
print(titanic["Embarked"][:10]) 

在这里插入图片描述
3.线性回归来预测分类

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

from sklearn.linear_model import LinearRegression #线性回归
from sklearn.model_selection import KFold #K折交叉验证
from sklearn.model_selection import train_test_split #划分数据集为测试集和训练集

titanic = pd.read_csv('titanic_train.csv')
#print(titanic.describe())

titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median()) #用中位数填充Age的缺失值
#print(titanic)

#将Sex列的属性数值化
titanic.loc[titanic["Sex"] == "male","Sex"] = 0
titanic.loc[titanic["Sex"] == "female","Sex"] = 1
#print(titanic["Sex"][:5]) #0 1 1 1 0
#print("------------------------")

#将Enbarkes列的属性数值化
titanic["Embarked"] = titanic["Embarked"].fillna("S") #缺失值填充为最多的值
titanic.loc[titanic["Embarked"] == "S","Embarked"] = 0
titanic.loc[titanic["Embarked"] == "C","Embarked"] = 1
titanic.loc[titanic["Embarked"] == "Q","Embarked"] = 2
#print(titanic["Embarked"][:10])

#取出数据和标签
columns = ["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]
X = titanic[columns]
#print(X[:5])
y = titanic["Survived"]
#print(y[:5])


#用线性回归预测
alg = LinearRegression()

kf = KFold(n_splits=3) #用训练集数据进行交叉验证
kf.get_n_splits(X)

predictions = []
for train,test in kf.split(X):
    train_data = titanic[columns].iloc[train,:]
    train_target = titanic["Survived"].iloc[train]

    alg.fit(train_data,train_target)
    pred_target = alg.predict(titanic[columns].iloc[test,:])
    predictions.append(pred_target)

#print(predictions) #结果是预测的值并不是分类值,一个列表中包括了三个数组(上面操作我们分成三个不同的测试集得出预测结果)

#线性回归的预测值来分类
predictions = np.concatenate(predictions,axis=0) #我们将三个测试集得出的结果进行连接

predictions[predictions > 0.5] = 1 #阈值0.5
predictions[predictions <= 0.5] = 0

print(predictions)
print(len(predictions)) #891

accurary = sum(predictions[predictions == titanic["Survived"]]) / len(predictions)
print(accurary) #0.2615039281705948 精确度很低,效果很差

4.逻辑回归来分类

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

from sklearn.linear_model import LinearRegression #线性回归
from sklearn.linear_model import LogisticRegression #逻辑回归
from sklearn.model_selection import KFold #K折交叉验证
from sklearn.model_selection import train_test_split #划分数据集为测试集和训练集

titanic = pd.read_csv('titanic_train.csv')
#print(titanic.describe())

titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median()) #用中位数填充Age的缺失值
#print(titanic)

#将Sex列的属性数值化
titanic.loc[titanic["Sex"] == "male","Sex"] = 0
titanic.loc[titanic["Sex"] == "female","Sex"] = 1
#print(titanic["Sex"][:5]) #0 1 1 1 0
#print("------------------------")

#将Enbarkes列的属性数值化
titanic["Embarked"] = titanic["Embarked"].fillna("S") #缺失值填充为最多的值
titanic.loc[titanic["Embarked"] == "S","Embarked"] = 0
titanic.loc[titanic["Embarked"] == "C","Embarked"] = 1
titanic.loc[titanic["Embarked"] == "Q","Embarked"] = 2
#print(titanic["Embarked"][:10])

#取出数据和标签
columns = ["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]
X = titanic[columns]
#print(X[:5])
y = titanic["Survived"]
#print(y[:5])

#用逻辑回归预测

lr = LogisticRegression()
lr.fit(X,y)
print(lr.score(X,y))  #0.7991021324354658,准确率还可以

5.随机森林

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

from sklearn.linear_model import LinearRegression #线性回归
from sklearn.linear_model import LogisticRegression #逻辑回归
from sklearn.ensemble import RandomForestClassifier #随机森林
from sklearn.model_selection import KFold #K折交叉验证
from sklearn.model_selection import train_test_split #划分数据集为测试集和训练集
from sklearn.model_selection import cross_val_score

titanic = pd.read_csv('titanic_train.csv')
#print(titanic.describe())

titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median()) #用中位数填充Age的缺失值
#print(titanic)

#将Sex列的属性数值化
titanic.loc[titanic["Sex"] == "male","Sex"] = 0
titanic.loc[titanic["Sex"] == "female","Sex"] = 1
#print(titanic["Sex"][:5]) #0 1 1 1 0
#print("------------------------")

#将Enbarkes列的属性数值化
titanic["Embarked"] = titanic["Embarked"].fillna("S") #缺失值填充为最多的值
titanic.loc[titanic["Embarked"] == "S","Embarked"] = 0
titanic.loc[titanic["Embarked"] == "C","Embarked"] = 1
titanic.loc[titanic["Embarked"] == "Q","Embarked"] = 2
#print(titanic["Embarked"][:10])

#取出数据和标签
columns = ["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]
X = titanic[columns]
#print(X[:5])
y = titanic["Survived"]
#print(y[:5])


#用随机森林回归预测
rfc = RandomForestClassifier(n_estimators=10,random_state=1)

kf = KFold()
kf.get_n_splits(X)

scores = cross_val_score(rfc,X,y,cv=kf)

print(scores.mean()) #0.7856341189674523

上面结果看来随机森林的效果和逻辑回归差不多,所以我们试试调参

#用随机森林回归预测
rfc = RandomForestClassifier(n_estimators=50,min_samples_split=4,min_samples_leaf=2,random_state=1)

kf = KFold()
kf.get_n_splits(X)

scores = cross_val_score(rfc,X,y,cv=kf)

print(scores.mean())  # 0.8159371492704826

可以看到调参之后准确率上升了,但是我们调参之后发现准确率不会上升特别多了,这时候我们需要从数据的本身出发,去挖掘一些潜在的特征

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

from sklearn.linear_model import LinearRegression #线性回归
from sklearn.linear_model import LogisticRegression #逻辑回归
from sklearn.ensemble import RandomForestClassifier #随机森林
from sklearn.model_selection import KFold #K折交叉验证
from sklearn.model_selection import train_test_split #划分数据集为测试集和训练集
from sklearn.model_selection import cross_val_score

titanic = pd.read_csv('titanic_train.csv')
#print(titanic.describe())

titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median()) #用中位数填充Age的缺失值
#print(titanic)

#将Sex列的属性数值化
titanic.loc[titanic["Sex"] == "male","Sex"] = 0
titanic.loc[titanic["Sex"] == "female","Sex"] = 1
#print(titanic["Sex"][:5]) #0 1 1 1 0
#print("------------------------")

#将Enbarkes列的属性数值化
titanic["Embarked"] = titanic["Embarked"].fillna("S") #缺失值填充为最多的值
titanic.loc[titanic["Embarked"] == "S","Embarked"] = 0
titanic.loc[titanic["Embarked"] == "C","Embarked"] = 1
titanic.loc[titanic["Embarked"] == "Q","Embarked"] = 2
#print(titanic["Embarked"][:10])

#取出数据和标签
columns = ["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]
X = titanic[columns]
#print(X[:5])
y = titanic["Survived"]
#print(y[:5])

#添加一些潜在特征
X["FamilySize"] = titanic["SibSp"] + titanic["Parch"] #添加家庭成员数量信息
X["Namelength"] = titanic["Name"].apply(lambda x:len(x)) #名字的长度

print(X[:5])


#用随机森林回归预测
rfc = RandomForestClassifier(n_estimators=50,min_samples_split=4,min_samples_leaf=2,random_state=1)

kf = KFold()
kf.get_n_splits(X)

scores = cross_val_score(rfc,X,y,cv=kf)

print(scores.mean()) #0.8237934904601572

可以看到准确率确实提高了一些,我们可以添加特征,也就可以选择一些比较重要的特征,那么如何选择呢

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

from sklearn.linear_model import LinearRegression #线性回归
from sklearn.linear_model import LogisticRegression #逻辑回归
from sklearn.ensemble import RandomForestClassifier #随机森林
from sklearn.model_selection import KFold #K折交叉验证
from sklearn.model_selection import train_test_split #划分数据集为测试集和训练集
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import SelectKBest,f_classif #最好的特征选择

titanic = pd.read_csv('titanic_train.csv')
#print(titanic.describe())

titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median()) #用中位数填充Age的缺失值
#print(titanic)

#将Sex列的属性数值化
titanic.loc[titanic["Sex"] == "male","Sex"] = 0
titanic.loc[titanic["Sex"] == "female","Sex"] = 1
#print(titanic["Sex"][:5]) #0 1 1 1 0
#print("------------------------")

#将Enbarkes列的属性数值化
titanic["Embarked"] = titanic["Embarked"].fillna("S") #缺失值填充为最多的值
titanic.loc[titanic["Embarked"] == "S","Embarked"] = 0
titanic.loc[titanic["Embarked"] == "C","Embarked"] = 1
titanic.loc[titanic["Embarked"] == "Q","Embarked"] = 2
#print(titanic["Embarked"][:10])

#取出数据和标签
columns = ["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]
X = titanic[columns]
#print(X[:5])
y = titanic["Survived"]
#print(y[:5])

#添加一些潜在特征
X["FamilySize"] = titanic["SibSp"] + titanic["Parch"] #添加家庭成员数量信息
X["Namelength"] = titanic["Name"].apply(lambda x:len(x)) #名字的长度

#print(X[:5])

columns = ["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked","FamilySize","Namelength"]


#用随机森林回归预测
#rfc = RandomForestClassifier(n_estimators=50,min_samples_split=4,min_samples_leaf=2,random_state=1)

#选择最好的5个特征去训练
selector = SelectKBest(f_classif,k=5)
selector.fit(X,y)

scores = -np.log10(selector.pvalues_)
#print(scores)

plt.bar(range(X.shape[1]),scores)
plt.xticks(range(X.shape[1]),columns,rotation="vertical") #X轴的标签
plt.show()

结果:
在这里插入图片描述
6.集成算法:有很多种(具体见下面链接),这里选了一种
链接:https://www.jianshu.com/p/15a6611f1896 (转!)

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

from sklearn.linear_model import LinearRegression #线性回归
from sklearn.linear_model import LogisticRegression #逻辑回归
from sklearn.ensemble import RandomForestClassifier #随机森林
from sklearn.ensemble import GradientBoostingClassifier #Boosting集成算法
from sklearn.model_selection import KFold #K折交叉验证
from sklearn.model_selection import train_test_split #划分数据集为测试集和训练集
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import SelectKBest,f_classif #最好的特征选择

titanic = pd.read_csv('titanic_train.csv')
#print(titanic.describe())

titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median()) #用中位数填充Age的缺失值
#print(titanic)

#将Sex列的属性数值化
titanic.loc[titanic["Sex"] == "male","Sex"] = 0
titanic.loc[titanic["Sex"] == "female","Sex"] = 1
#print(titanic["Sex"][:5]) #0 1 1 1 0
#print("------------------------")

#将Enbarkes列的属性数值化
titanic["Embarked"] = titanic["Embarked"].fillna("S") #缺失值填充为最多的值
titanic.loc[titanic["Embarked"] == "S","Embarked"] = 0
titanic.loc[titanic["Embarked"] == "C","Embarked"] = 1
titanic.loc[titanic["Embarked"] == "Q","Embarked"] = 2
#print(titanic["Embarked"][:10])

#取出数据和标签
columns = ["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]
X = titanic[columns]
#print(X[:5])
y = titanic["Survived"]
#print(y[:5])

#添加一些潜在特征
X["FamilySize"] = titanic["SibSp"] + titanic["Parch"] #添加家庭成员数量信息
X["Namelength"] = titanic["Name"].apply(lambda x:len(x)) #名字的长度

#print(X[:5])

kf = KFold(n_splits=3) #交叉验证
kf.get_n_splits(X)

gbc = GradientBoostingClassifier(n_estimators=50,random_state=1)

scores = cross_val_score(gbc,X,y,cv=kf)

print(scores.mean()) #0.8159371492704827

猜你喜欢

转载自blog.csdn.net/fenfenxhf/article/details/82948811
今日推荐