机器学习流程总结概括

在这里插入图片描述

在这里插入图片描述在这里插入图片描述

如下代码是参考博客:https://blog.csdn.net/han_xiaoyang/article/details/49797143#commentsedit中的例子实现的,具体包括
1、数据查看

'''
该部分用于分析各类属性获救情况
'''
import pandas as pd         #数据分析
import numpy as np          #科学计算
from pandas import Series,DataFrame
import matplotlib.pyplot as plt
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['SimHei']        #指定默认字体
'''
加载并查看训练集数据信息
'''
data_train = pd.read_csv("H:/Titanic_data/train.csv")
# data_train.info()



fig = plt.figure()
fig.set(alpha=0.2)  # 设定图表颜色alpha参数


# '''
# 数据初步分析---利用图标查看乘客各属性分布
# '''
# plt.subplot2grid((2,3),(0,0))             # 在一张大图里分列几个小图
# data_train.Survived.value_counts().plot(kind='bar')# 获救情况统计--柱状图,value_count()统计各类次数
# plt.title(u"获救情况 (1为获救)") # 标题
# plt.ylabel(u"人数")
#
# plt.subplot2grid((2,3),(0,1))
# data_train.Pclass.value_counts().plot(kind="bar")
# plt.ylabel(u"人数")
# plt.title(u"乘客等级分布")
#
# plt.subplot2grid((2,3),(0,2))
# plt.scatter(data_train.Survived, data_train.Age)       #scatter为散点图
# plt.ylabel(u"年龄")                         # 设定纵坐标名称
# plt.grid(b=True, which='major', axis='y')  #网格线,aixs表示网格线的方向
# plt.title(u"按年龄看获救分布 (1为获救)")
#
#
# plt.subplot2grid((2,3),(1,0), colspan=2)    #clospan表示图像尺寸,此处2表示占据第二排从0开始连续两个位置
# data_train.Age[data_train.Pclass == 1].plot(kind='kde')     #kde为
# data_train.Age[data_train.Pclass == 2].plot(kind='kde')
# data_train.Age[data_train.Pclass == 3].plot(kind='kde')
# plt.xlabel(u"年龄")# plots an axis lable
# plt.ylabel(u"密度")
# plt.title(u"各等级的乘客年龄分布")
# plt.legend((u'头等舱', u'2等舱',u'3等舱'),loc='best') # sets our legend for our graph.
#
#
# plt.subplot2grid((2,3),(1,2))
# data_train.Embarked.value_counts().plot(kind='bar')
# plt.title(u"各登船口岸上船人数")
# plt.ylabel(u"人数")
# plt.show()


'''
属性与获救结果的关联统计
'''
# #各等级获救与未获救人数查看。重要特征!!!
# Survived_0 = data_train.Pclass[data_train.Survived == 0].value_counts()
# Survived_1 = data_train.Pclass[data_train.Survived == 1].value_counts()
# df=pd.DataFrame({u'获救':Survived_1,u'未获救':Survived_0})
# df.plot(kind ='bar',stacked=True) #stack把同一组数据描述在一个直方条上
# plt.title(u'各乘客等级获救情况')
# plt.xlabel(u'乘客等级')
# plt.ylabel(u'人数')
# plt.show()



#获救与未获救人中男女分布情况。重要特征!!!
# Survived_m = data_train.Survived[data_train.Sex == 'male'].value_counts()
# Survived_f = data_train.Survived[data_train.Sex == 'female'].value_counts()
# df=pd.DataFrame({u'男性':Survived_m, u'女性':Survived_f})
# df.plot(kind='bar', stacked=True)
# plt.title(u"按性别看获救情况")
# plt.xlabel(u"性别")
# plt.ylabel(u"人数")
# plt.show()


#各种舱级别情况下各性别的获救情况
# plt.title(u'根据舱等级和性别的获救情况')
# ax1=fig.add_subplot(141)
# data_train.Survived[data_train.Sex == 'female'][data_train.Pclass != 3].value_counts().plot(kind='bar', label="female,highclass", color='#FA2479')
# ax1.set_xticklabels([u"未获救", u"获救"], rotation=0)
# ax1.legend([u"女性/高级舱"], loc='best')
#
# ax2=fig.add_subplot(142,sharey=ax1)
# data_train.Survived[data_train.Sex == 'female'][data_train.Pclass == 3].value_counts().plot(kind='bar', label="female,lowclass", color='pink')
# ax2.set_xticklabels([u"未获救", u"获救"], rotation=0)
# ax2.legend([u"女性/低级舱"], loc='best')
#
# ax3=fig.add_subplot(143,sharey=ax1)
# data_train.Survived[data_train.Sex == 'male'][data_train.Pclass != 3].value_counts().plot(kind='bar', label="male,highclass", color='lightblue')
# ax3.set_xticklabels([u"未获救", u"获救"], rotation=0)
# ax3.legend([u"男性/高级舱"], loc='best')
#
# ax4=fig.add_subplot(144,sharey=ax1)
# data_train.Survived[data_train.Sex == 'male'][data_train.Pclass == 3].value_counts().plot(kind='bar', label="male,lowclass", color='steelblue')
# ax4.set_xticklabels([u"未获救", u"获救"], rotation=0)
# ax4.legend([u"男性/低级舱"], loc='best')
# plt.show()

#获取各登船港口的获救情况
# Survived_0 = data_train.Embarked[data_train.Survived == 0].value_counts()
# Survived_1 = data_train.Embarked[data_train.Survived == 1].value_counts()
# df=pd.DataFrame({u'获救':Survived_1,u'未获救':Survived_0})
# df.plot(kind ='bar',stacked=True) #stack把同一组数据描述在一个直方条上
# plt.title(u'各港口获救情况')
# plt.xlabel(u'登陆港口')
# plt.ylabel(u'人数')


'''
#堂兄/妹,孩子/父母有几人,对获救的影响
                PassengerId
SibSp Survived             
0     0                 398
      1                 210
1     0                  97
      1                 112
2     0                  15
      1                  13
3     0                  12
      1                   4
4     0                  15
      1                   3
5     0                   5
8     0                   7
'''

# g = data_train.groupby(['SibSp','Survived'])        #groupby()用法
# df = pd.DataFrame(g.count()['PassengerId'])
# print(df)


'''
Cabin属性:
应该算作类目型的,本来缺失值就多,还如此不集中,注定是个棘手货…第一感觉,这玩意儿如
果直接按照类目特征处理的话,太散了,估计每个因子化后的特征都拿不到什么权重。加上有那
么多缺失值,要不我们先把Cabin缺失与否作为条件(虽然这部分信息缺失可能并非未登记,maybe
只是丢失了而已,所以这样做未必妥当),先在有无Cabin信息这个粗粒度上看看Survived的情况
好了。

有cabin获救率看着比未获救概率大了些
'''

# Survived_cabin = data_train.Survived[pd.notnull(data_train.Cabin)].value_counts()
# Survived_nocabin = data_train.Survived[pd.isnull(data_train.Cabin)].value_counts()
# df=pd.DataFrame({u'有':Survived_cabin,u'未获救':Survived_nocabin})
# df.plot(kind ='bar',stacked=True) #stack把同一组数据描述在一个直方条上
# plt.title(u'有无cabin分类获救情况')
# plt.xlabel(u'carbin有无')
# plt.ylabel(u'人数')
# plt.show()

	根据上述函数查看各特征与结果之间的关系

2、数据处理:(1)对缺失值,参考完整的数据,根据其他特征进行补充-----拟合补全。
(2)对于变化比较大的特征进行sacle处理

import pandas as pd
from sklearn.ensemble import RandomForestRegressor
import sklearn.preprocessing as preprocessing
import numpy as np          #科学计算
from pylab import mpl


data_train = pd.read_csv("G:/KNNtest/Titanic/train.csv")
data_test = pd.read_csv("G:/KNNtest/Titanic/test.csv")
'''
一、数据处理

    我这里说的数据预处理,其实就包括了很多Kaggler津津乐道的feature engineering过程,
灰常灰常有必要!
    特征工程(feature engineering)太重要了!此处需要对
    1、cabin(有无)
    2、Age属性进行处理(缺失)
    通常遇到缺值的情况,我们会有几种常见的处理方式
    1、如果缺值的样本占总数比例极高,我们可能就直接舍弃了,作为特征加入的话,可能反倒带入noise,影响最后的结果了
    2、如果缺值的样本适中,而该属性非连续值特征属性(比如说类目属性),那就把NaN作为一个新类别,加到类别特征中
    3、如果缺值的样本适中,而该属性为连续值特征属性,有时候我们会考虑给定一个step(比如这里的age,我们可以考虑每
隔2/3岁为一个步长),然后把它离散化,之后把NaN作为一个type加到属性类目中。
    4、有些情况下,缺失的值个数并不是特别多,那我们也可以试着根据已有的值,拟合一下数据,补充上。


    本例中,后两种处理方式应该都是可行的,我们先试试拟合补全(虽然说没有特别多的背景可供我们拟合,这不一定是一个多
么好的选择)
    我们这里用scikit-learn中的RandomForest来拟合一下缺失的年龄数据(注:RandomForest是一个用在原始数据中做不同采
样,建立多颗DecisionTree,再进行average等等来降低过拟合现象,提高结果的机器学习算法。
    注:填补缺失值代码运行后并未在原数据表中看到更改,但是打印可以看出‘Age’项空缺值已被填补



'''


# 使用RandomForestRegressor填补年龄的缺失值
def set_missing_ages(df):
    # 把已有的数据特征取出来丢进RandomForestRegressor
    age_df = df[['Age', 'Fare', 'Parch', 'SibSp', 'Pclass']]

    # 乘客分为已知年龄和未知年龄两部分
    known_age = age_df[age_df.Age.notnull()].as_matrix()
    unknown_age = age_df[age_df.Age.isnull()].as_matrix()

    # y即目标年龄
    y = known_age[:, 0]
    # x即特诊属性值
    x = known_age[:, 1:]

    # fit到RandomForestRegressor中
    rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)  # n_estimators为决策树个数,n_jobs=-1表示CPU有
    # 多少core,就启动多少job
    rfr.fit(x, y)

    # 用得到的模型进行未知年龄的预测
    predicatedAges = rfr.predict(unknown_age[:, 1::])
    # 用得到的预测结果填补原缺失数据
    df.loc[(df.Age.isnull()), 'Age'] = predicatedAges
    scaler = preprocessing.StandardScaler()  # 利用scaler对Age,Fare进行处理
    df['Age'] = scaler.fit_transform(df[['Age']])
    df['Fare'] = scaler.fit_transform(df[['Fare']])
    return df, rfr

'''
Age和Fare两个属性,乘客的数值幅度变化很大!如果大家了解逻辑回归与梯度下降的话,会知道各属性值之
间scale差距太大,将对收敛速度造成几万点伤害值!甚至不收敛!所以我们先用scikit-learn里面的preprocessing
模块对这俩属性做一个scaling,将一些变化幅度较大的特征化到[-1,1]之内。
'''
def set_cabin_type(df):
    df.loc[(df.Cabin.notnull()), 'Cabin'] = "Yes"
    df.loc[(df.Cabin.isnull()), 'Cabin'] = "No"
    dummies_Cabin = pd.get_dummies(df.Cabin, prefix='Cabin')
    dummies_Embarked = pd.get_dummies(df.Embarked, prefix='Embarked')
    dummies_Sex = pd.get_dummies(df.Sex, prefix='Sex')
    dummies_Pclass = pd.get_dummies(df.Pclass, prefix='Pclass')

    df = pd.concat([df, dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass], axis=1)
    df.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1, inplace=True)

    return df


3、特征提取部分主要是通过1部分观察特征与结果之间的关系现得到大概特征项,再调整
4、建立模型

from sklearn import linear_model
from sklearn.ensemble import BaggingRegressor

def LogisticRegre(df):
    train_df = df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
    train_np = train_df.as_matrix()

    #y为survival结果
    y = train_np[:,0]
    #x为特征属性值
    X= train_np[:,1:]

    clf = linear_model.LogisticRegression(C=1.0,penalty='l1',tol=1e-6)
    bagging_clf = BaggingRegressor(clf, n_estimators=20, max_samples=0.8, max_features=1.0, bootstrap=True,
                                   bootstrap_features=False, n_jobs=-1)
    return bagging_clf.fit(X, y)

pd.DataFrame({"columns": list(te_d.columns)[2:14], "coef": list(train_clf.coef_.T)})#把model系数和feature关联起来


5交叉验证

 #!!!!数据分割,训练数据:CV数据 = 7:3,该部分写在主程序中

    split_train,split_cv = model_selection.train_test_split(te_d,test_size=0.3,random_state=0)
    train_df = split_train.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
    # 生成模型
    clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
    clf.fit(train_df.as_matrix()[:, 1:], train_df.as_matrix()[:, 0])
    # train_df.to_csv("G:/KNNtest/Titanic/train_df.csv")

    # !!!对cross validation数据进行预测
    cv_df = split_cv.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
    predictions = clf.predict(cv_df.as_matrix()[:, 1:])

 	clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
    all_data = tr_d.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
    X = all_data.as_matrix()[:, 1:]
    y = all_data.as_matrix()[:, 0]
    # print(model_selection.cross_val_score(clf, X, y, cv=5))       #交叉验证结果

6、learning curve

import numpy as np
import matplotlib.pyplot as plt
from sklearn.learning_curve import learning_curve


# 用sklearn的learning_curve得到training_score和cv_score,使用matplotlib画出learning curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1,
                        train_sizes=np.linspace(.05, 1., 20), verbose=0, plot=True):
    """
    画出data在某模型上的learning curve.
    参数解释
    ----------
    estimator : 你用的分类器。
    title : 表格的标题。
    X : 输入的feature,numpy类型
    y : 输入的target vector
    ylim : tuple格式的(ymin, ymax), 设定图像中纵坐标的最低点和最高点
    cv : 做cross-validation的时候,数据分成的份数,其中一份作为cv集,其余n-1份作为training(默认为3份)
    n_jobs : 并行的的任务数(默认1)
    """
    train_sizes, train_scores, test_scores = learning_curve(
        estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, verbose=verbose)

    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)

    if plot:
        plt.figure()
        plt.title(title)
        if ylim is not None:
            plt.ylim(*ylim)
        plt.xlabel(u"训练样本数")
        plt.ylabel(u"得分")
        plt.gca().invert_yaxis()
        plt.grid()

        plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
                         alpha=0.1, color="b")
        plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,
                         alpha=0.1, color="r")
        plt.plot(train_sizes, train_scores_mean, 'o-', color="b", label=u"训练集上得分")
        plt.plot(train_sizes, test_scores_mean, 'o-', color="r", label=u"交叉验证集上得分")

        plt.legend(loc="best")

        plt.draw()
        plt.show()
        plt.gca().invert_yaxis()

    midpoint = ((train_scores_mean[-1] + train_scores_std[-1]) + (test_scores_mean[-1] - test_scores_std[-1])) / 2
    diff = (train_scores_mean[-1] + train_scores_std[-1]) - (test_scores_mean[-1] - test_scores_std[-1])
    return midpoint, diff


7、模型融合

from sklearn import linear_model
from sklearn.ensemble import BaggingRegressor

def LogisticRegre(df):
    train_df = df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
    train_np = train_df.as_matrix()

    #y为survival结果
    y = train_np[:,0]
    #x为特征属性值
    X= train_np[:,1:]

    clf = linear_model.LogisticRegression(C=1.0,penalty='l1',tol=1e-6)
    bagging_clf = BaggingRegressor(clf, n_estimators=20, max_samples=0.8, max_features=1.0, bootstrap=True,
                                   bootstrap_features=False, n_jobs=-1)
    return clf.fit(X, y)

main()函数:

import pandas as pd
import numpy as np
import DataTreating
import LogisticRegression
from sklearn.ensemble import RandomForestRegressor
import sklearn.preprocessing as preprocessing
from sklearn import linear_model
from sklearn import model_selection
import learning_curve
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['SimHei']        #指定默认字体

if __name__ == '__main__':
    data_train = pd.read_csv("G:/KNNtest/Titanic/train.csv")
    data_test = pd.read_csv("G:/KNNtest/Titanic/test1.csv")
    tr_data,rfr1= DataTreating.set_missing_ages(data_train)
    tr_d = DataTreating.set_cabin_type(tr_data)
    te_data,rfr2= DataTreating.set_missing_ages(data_test)
    te_d = DataTreating.set_cabin_type(te_data)

    # test =te_d .filter(regex='Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
    # clf = LogisticRegression.LogisticRegre(test)
    # predictions = clf.predict(test)
    # result = pd.DataFrame(
    #     {'PassengerId': te_d['PassengerId'].as_matrix(), 'Survived': predictions.astype(np.int32)})
    # result.to_csv("G:/KNNtest/Titanic/logistic_regression_predictions.csv", index=False)



    #!!!!数据分割,训练数据:CV数据 = 7:3

    split_train,split_cv = model_selection.train_test_split(te_d,test_size=0.3,random_state=0)
    train_df = split_train.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
    # 生成模型
    clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
    clf.fit(train_df.as_matrix()[:, 1:], train_df.as_matrix()[:, 0])
    # train_df.to_csv("G:/KNNtest/Titanic/train_df.csv")

    # !!!对cross validation数据进行预测
    cv_df = split_cv.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
    predictions = clf.predict(cv_df.as_matrix()[:, 1:])

    # cv_df.to_csv("G:/KNNtest/Titanic/cv_df.csv")
    # #bad_case查看
    # origin_data_train = pd.read_csv("G:/KNNtest/Titanic/train.csv")
    # bad_cases = origin_data_train.loc[
    #     origin_data_train['PassengerId'].isin(split_cv[predictions != cv_df.as_matrix()[:, 0]]['PassengerId'].values)]
    # bad_cases.to_csv("G:/KNNtest/Titanic/bad_cases.csv")

    #!!!!! 构造逻辑回归模型
    train_clf = LogisticRegression.LogisticRegre(tr_d)
    pd.DataFrame({"columns": list(te_d.columns)[2:14], "coef": list(train_clf.coef_.T)})#把model系数和feature关联起来


    clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
    all_data = tr_d.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
    X = all_data.as_matrix()[:, 1:]
    y = all_data.as_matrix()[:, 0]
    # print(model_selection.cross_val_score(clf, X, y, cv=5))       #交叉验证结果

    #!!!!!learning curve曲线
    learning_curve.plot_learning_curve(clf,X = all_data.as_matrix()[:, 1:],y = all_data.as_matrix()[:, 0],title='Learning_Curve')
    print(clf.score(X,y))




猜你喜欢

转载自blog.csdn.net/weixin_40924580/article/details/82809484