建模与分析

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/zhuisaozhang1292/article/details/82149255

利用不同机器学习方法对数据建模

#     模型选择
#     交叉验证
#基于随机森林的交叉验证
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import KFold
from sklearn.metrics import confusion_matrix,log_loss
import time
print('find best n_estimators for RandomForestClassifier')
min_score = 100000
best_n = 0
scores_n = []
range_n = np.logspace(0,2,num=3).astype(int)#表示1 10 100
for n in range_n:
    print('the number of stress:{0}'.format(n))
    t1 = time.time()
    rfc_score = 0
    rfc = RandomForestClassifier(n_estimators=n)#指定用多少棵树进行训练
    #shuffle 在每次划分时,是否进行洗牌  train_k,test_k为索引值 
    print(train_kobe.columns)
    for train_k,test_k in KFold(len(train_kobe),n_folds=10,shuffle = True): 
        #代码出错在没法将特征中的IND字符串转换为 float类型
        rfc.fit(train_kobe.iloc[train_k],train_label.iloc[train_k])  #iloc比 是基于索引位来选取数据集train_k
        pred = rfc.predict(train_kobe.iloc[test_k])
        rfc_score += log_loss(train_label.iloc[test_k],pred)/10
    scores_n.append(rfc_score)
    if rfc_score < min_score:
        min_score = rfc_score
        best_n = n
    t2 = time.time()
    print('Done processing {0} trees ({1:.3f}sec)'.format(n,t2-t1))
print(best_n,min_score)
print('find best max_depth for RandomForestClassifier')
min_score = 100000
best_m = 0
scores_m = []
range_m = np.logspace(0,2,num=3).astype(int)
for m in range_m:
    print('the number of stress:{0}'.format(m))
    t1 = time.time()
    rfc_score = 0
    rfc = RandomForestClassifier(max_depth=m,n_estimators=best_n)#max_depth 指定用多少深度/层的树
    for train_k,test_k in KFold(len(train_kobe),n_folds=10,shuffle = True): 
        rfc.fit(train_kobe.iloc[train_k],train_label.iloc[train_k])  
        pred = rfc.predict(train_kobe.iloc[test_k])
        rfc_score += log_loss(train_label.iloc[test_k],pred)/10
    scores_n.append(rfc_score)
    if rfc_score < min_score:
        min_score = rfc_score
        best_m = m
    t2 = time.time()
    print('Done processing {0} trees ({1:.3f}sec)'.format(m,t2-t1))
print(best_m,min_score)


#     模型参数优化
#********************结果分析与改进*******************#
#     性能度量:
#         分类任务:精度 召回率 混淆矩阵

# #在处理后的数据集上计算混淆矩阵
# 利用逻辑回归对数据进行二分类
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import KFold,cross_val_score
from sklearn.metrics import confusion_matrix,recall_score,classification_report
def printing_Kfold_scores(X_train_data,y_train_data):
    fold = KFold(len(y_train_data),5,shuffle = False)
    c_param_range = [0.01,0.1,1,10,100]
    results_table = pd.DataFrame(index = range(len(c_param_range),2),columns = ['C_parameter','Mean recall score'])
    results_table['C_parameter'] = c_param_range
    j = 0
    for c_param in c_param_range:
        print('------------------')
        print('C parameter:',c_param)
        print('------------------')
        print('')
        recall_accs = []
        for iteration,indices in enumerate(fold,start = 1): #start = 1 表示下标起始位置
            #iteration,indices 分别为枚举编号 和 枚举值
            lr = LogisticRegression(C = c_param,penalty='l1') #c:正则化系数λ的倒数,float类型,默认为1.0。必须是正浮点型数。像SVM一样,越小的数值表示越强的正则化。
            lr.fit( X_train_data.iloc[indices[0]:] , y_train_data.iloc[indices[0],:].values.ravel() ) # ravel 表示将多维降成一维 与 flatten 功能一样
            y_pred_undersample = lr.predict(X_train_data.iloc[indices[1]:].values)
            recall_acc = recall_score(y_train_data.iloc[indices[1],:].values,y_pred_undersample)
            recall_accs.append(recall_acc)
            print('Iteration',iteration,'recall score = ',recall_acc)
        results_table.ix[j,'Mean recall score'] = np.mean(recall_accs)
        j += 1
        print('')
        print('Mean recall score',np.mean(recall_accs))
        print('')
    best_c = results_table.loc[results_table['Mean recall score'].idxmax()]['C_parameter']
    print('******************************************************************************')
    print('Best model to choose from cross validation is with C parameter = ', best_c)
    print('******************************************************************************')
    return best_c
best_c = printing_Kfold_scores(X_train_undersample, y_train_undersample)
 
#在处理后的数据集上计算混淆矩阵
# import itertools
lr = LogisticRegression(C = best_c , penalty= 'l1')
lr.fit(X_train_undersample,y_train_undersample.values.ravel())
y_pred_undersample = lr.predict(X_test_undersample.values)
cnf_matrix = confusion_matrix(y_test_undersample, y_pred_undersample)
np.set_printoptions(precision = 2) #设置print输出结果的保留精度
print('Recall metric in the testing dataset:',cnf_matrix[1,1]/(cnf_matrix[1,0]+cnf_matrix[1,1]))
#绘制混淆矩阵 
def plot_confusion_matrix(cm,classes,title='Confusion matrix',cmap = plt.cm.Blues):
    plt.imshow(cm,interpolation='nearest',cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks,classes,rotation=0)
    plt.yticks(tick_marks,classes)
    thresh = cm.max()/2
    for i,j in itertools.product(range(cm.shape[0]),range(cm.shape[1])):
        plt.text(j,i,cm[i,j],horizontal_alignments='center',color = 'white' if cm[i,j] > thresh else 'black')
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted labe')
plt.figure()
class_names = [0,1]
plot_confusion_matrix(cnf_matrix,classes = class_names,title='Confusion matrix')
plt.show()


#尝试用逻辑回归对数据进行分类
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
alg = LogisticRegression(random_state=1)
#把初始训练样本分成k份,其中(k-1)份被用作训练集,剩下一份被用作评估集,这样一共可以对分类器做k次训练,并且得到k个训练结果。
scores = cross_validation.cross_val_score(alg, titanic[predictors],titanic['Survived'],cv =3)
print(scores.mean()) #实测0.7878787878787877


from sklearn import cross_validation
from sklearn.ensemble import RandomForestClassifier
predictors = ['Pclass','Sex','Age','SibSp','Parch','Fare','Embarked']
alg = RandomForestClassifier(random_state= 1, n_estimators= 10, min_samples_split= 2, min_samples_leaf= 1)
kf = cross_validation.KFold(titanic.shape[0],n_folds=3,random_state=1)
scores = cross_validation.cross_val_score(alg,titanic[predictors],titanic['Survived'],cv = kf)
print(scores.mean()) #0.7856341189674523
#修改一下随机森林的参数 比较一下效果
alg = RandomForestClassifier(random_state= 1, n_estimators= 50, min_samples_split= 4, min_samples_leaf= 2)
kf = cross_validation.KFold(titanic.shape[0],n_folds=3,random_state=1)
scores = cross_validation.cross_val_score(alg,titanic[predictors],titanic['Survived'],cv = kf)
print(scores.mean()) #0.8159371492704826


#通过集成的方法进行分类
#训练
from sklearn.ensemble import GradientBoostingClassifier
import numpy as np
algorithms = [
            [GradientBoostingClassifier(random_state=1,n_estimators=25,max_depth=3),['Pclass','Sex','Fare','FamilySize','Title','Age','Embarked']],
            [LogisticRegression(random_state=1),['Pclass','Sex','Fare','FamilySize','Title','Age','Embarked']]
             ]
kf = KFold(titanic.shape[0],n_folds=3,random_state=1)
predictions = []
for train,test in kf:
    train_target = titanic['Survived'].iloc[train]
    full_test_predictinons = []
    for alg,predictors in algorithms:
        alg.fit(titanic[predictors].iloc[train,:],train_target)
        test_predictions = alg.predict_proba(titanic[predictors].iloc[test,:].astype(float))[:,1]
        full_test_predictinons.append(test_predictions)
    test_predictions = (full_test_predictinons[0] + full_test_predictinons[1]) / 2
    #给予不同的分类器 不同的权重
    #predictions = (full_predictions[0] * 3 + full_predictions[1]) / 4
    test_predictions[test_predictions <= .5] = 0
    test_predictions[test_predictions > .5] = 1
    predictions.append(test_predictions)
# import numpy as np
predictions = np.concatenate(predictions,axis = 0)
#这个计算公式也是错误的 同上
# accuracy = sum(predictions[predictions == titanic['Survived']]) / len(predictions) #实测0.27946127946127947
#将其改为下面计算公式
accuracy = sum(predictions == titanic['Survived']) / len(predictions)
print('GradientBoostingClassifier accuracy:',accuracy) #0.82154 


#决策树分类
from sklearn.tree import DecisionTreeClassifier
decision_tree_classifier= DecisionTreeClassifier()
decision_tree_classifier.fit(training_inputs,training_classes)
print(decision_tree_classifier.score(testing_inputs,testng_classes)) #计算精度值
#对分类精度进行可视化
from sklearn.cross_validation import cross_val_score
import numpy as np
all_inputs = iris_data[['sepal_length_cm','sepal_width_cm','petal_length_cm','petal_width_cm']].values #可以不用加values
all_classes = iris_data['class'].values #可以不用加values
decision_tree_classifier = DecisionTreeClassifier() # math_depth = 1
cv_scores = cross_val_score(decision_tree_classifier,all_inputs,all_classes,cv =10)
print(cv_scores)
sb.distplot(cv_scores) # kde = False
plt.title('Average score:{}'.format(np.mean(cv_scores)))
plt.show()

from sklearn.tree import DecisionTreeClassifier
#通过GridSearchCV进行决策树最佳参数选择
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold
decision_tree_classifier = DecisionTreeClassifier()
parameter_grid = {'max_depth':[1,2,3,4,5],
                  'max_features':[1,2,3,4]}
cross_validation = StratifiedKFold(all_classes,n_folds=10)
grid_search = GridSearchCV(decision_tree_classifier,param_grid = parameter_grid,cv = cross_validation)
grid_search.fit(all_inputs, all_classes)
print('Best score:{}'.format(grid_search.best_score_))
print('Best parameter:{}'.format(grid_search.best_params_))
#将GridSearchCV选择的参数进行热度图可视化
grid_visualization = []
for grid_pair in grid_search.grid_scores_:
    grid_visualization.append(grid_pair.mean_validation_score)
grid_visualization = np.array(grid_visualization)
grid_visualization.shape = (5,4)
sb.heatmap(grid_visualization,cmap='Blues')
plt.xticks(np.arange(4)+0.5,grid_search.param_grid['max_features'])
plt.yticks(np.arange(5)+0.5,grid_search.param_grid['max_depth'][::-1])
plt.xlabel('max_features')
plt.ylabel('max_depth')
##可以通过下载graphviz软件将dot文件转换为 pdf文件 http://www.graphviz.org/

#通过随机森林选择最佳参数分类
from sklearn.ensemble import RandomForestClassifier
random_forest_classifier = RandomForestClassifier()
parameter_grid = {'n_estimators':[5,10,25,50],
                  'criterion':['gini','entropy'],
                  'max_features':[1,2,3,4],
                  'warm_start':[True,False]}
cross_validation = StratifiedKFold(all_classes,n_folds = 10)
grid_search = GridSearchCV(random_forest_classifier,param_grid=parameter_grid,cv = cross_validation)
grid_search.fit(all_inputs,all_classes)
print('Best score:{}'.format(grid_search.best_score_))
print('Best parameter:{}'.format(grid_search.best_params_))
print('Best_estimator:{}'.format(grid_search.best_estimator_))
plt.show()

######################使用级联预测模型
# Going to use these 5 base models for the stacking
import pandas as pd
import numpy as np
import re
import sklearn
import xgboost as xgb
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.offline as py
import plotly.graph_objs as go
import plotly.tools as tls
py.init_notebook_mode(connected=True)
import warnings
warnings.filterwarnings('ignore')
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.svm import SVC
from sklearn.cross_validation import KFold;

ntrain = train.shape[0]
ntest = test.shape[0]
SEED = 0 # for reproducibility
NFOLDS = 5 # set folds for out-of-fold prediction
kf = KFold(ntrain, n_folds= NFOLDS, random_state=SEED)
#对Sklearn分类器进行一下封装,便于我们后续直接调用
class SklearnHelper(object):
    def __init__(self, clf, seed=0, params=None):
        params['random_state'] = seed
        self.clf = clf(**params)
    def train(self, x_train, y_train):
        self.clf.fit(x_train, y_train)
    def predict(self, x):
        return self.clf.predict(x)
    def fit(self,x,y):
        return self.clf.fit(x,y)
    def feature_importances(self,x,y):
        print(self.clf.fit(x,y).feature_importances_)
#对XGBoost分类器进行相关的封装
def get_oof(clf, x_train, y_train, x_test):
    oof_train = np.zeros((ntrain,))
    oof_test = np.zeros((ntest,))
    oof_test_skf = np.empty((NFOLDS, ntest))
    for i, (train_index, test_index) in enumerate(kf):
        x_tr = x_train[train_index]
        y_tr = y_train[train_index]
        x_te = x_train[test_index]
        clf.train(x_tr, y_tr)
        oof_train[test_index] = clf.predict(x_te)
        oof_test_skf[i, :] = clf.predict(x_test)
    oof_test[:] = oof_test_skf.mean(axis=0)
    return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
#使用5种模型对其进行分类,并对模型的参数进行设置
# Random Forest parameters
rf_params = {
    'n_jobs': -1,
    'n_estimators': 500,
     'warm_start': True, 
     #'max_features': 0.2,
    'max_depth': 6,
    'min_samples_leaf': 2,
    'max_features' : 'sqrt',
    'verbose': 0
}
# Extra Trees Parameters
et_params = {
    'n_jobs': -1,
    'n_estimators':500,
    #'max_features': 0.5,
    'max_depth': 8,
    'min_samples_leaf': 2,
    'verbose': 0
}
# AdaBoost parameters
ada_params = {
    'n_estimators': 500,
    'learning_rate' : 0.75
}
# Gradient Boosting parameters
gb_params = {
    'n_estimators': 500,
     #'max_features': 0.2,
    'max_depth': 5,
    'min_samples_leaf': 2,
    'verbose': 0
}
# Support Vector Classifier parameters 
svc_params = {
    'kernel' : 'linear',
    'C' : 0.025
}
#根据设置的参数来创建模型对象:
rf = SklearnHelper(clf=RandomForestClassifier, seed=SEED, params=rf_params)
et = SklearnHelper(clf=ExtraTreesClassifier, seed=SEED, params=et_params)
ada = SklearnHelper(clf=AdaBoostClassifier, seed=SEED, params=ada_params)
gb = SklearnHelper(clf=GradientBoostingClassifier, seed=SEED, params=gb_params)
svc = SklearnHelper(clf=SVC, seed=SEED, params=svc_params)
#将我们的数据转换为模型需要的numpy数组的格式:
y_train = train['Survived'].ravel()
train = train.drop(['Survived'], axis=1)
x_train = train.values # Creates an array of the train data
x_test = test.values # Creats an array of the test data
#用5个模型分别用于XGBoost训练模型中进行训练预测:
et_oof_train, et_oof_test = get_oof(et, x_train, y_train, x_test) # Extra Trees
rf_oof_train, rf_oof_test = get_oof(rf,x_train, y_train, x_test) # Random Forest
ada_oof_train, ada_oof_test = get_oof(ada, x_train, y_train, x_test) # AdaBoost 
gb_oof_train, gb_oof_test = get_oof(gb,x_train, y_train, x_test) # Gradient Boost
svc_oof_train, svc_oof_test = get_oof(svc,x_train, y_train, x_test) # Support Vector Classifier
print("Training is complete")
#将每个模型中的特征提取出来
rf_feature = rf.feature_importances(x_train,y_train)
et_feature = et.feature_importances(x_train, y_train)
ada_feature = ada.feature_importances(x_train, y_train)
gb_feature = gb.feature_importances(x_train,y_train)
cols = train.columns.values
#整理得到的特征值如下:  这边的rf_feature 可能没法直接调用
# Create a dataframe with features
feature_dataframe = pd.DataFrame({
    'features': cols,
    'Random Forest feature importances': rf_feature,
    'Extra Trees  feature importances': et_feature,
    'AdaBoost feature importances': ada_feature,
    'Gradient Boost feature importances': gb_feature
})
#用图像的方式可以更加明显的表现出来:
trace = go.Scatter(
    y = feature_dataframe['Random Forest feature importances'].values,
    x = feature_dataframe['features'].values,
    mode='markers',
    marker=dict(
        sizemode = 'diameter',
        sizeref = 1,
        size = 25,
#       size= feature_dataframe['AdaBoost feature importances'].values,
        #color = np.random.randn(500), #set color equal to a variable
        color = feature_dataframe['Random Forest feature importances'].values,
        colorscale='Portland',
        showscale=True
    ),
    text = feature_dataframe['features'].values
)
data = [trace]
layout= go.Layout(
    autosize= True,
    title= 'Random Forest Feature Importance',
    hovermode= 'closest',
#     xaxis= dict(
#         title= 'Pop',
#         ticklen= 5,
#         zeroline= False,
#         gridwidth= 2,
#     ),
    yaxis=dict(
        title= 'Feature Importance',
        ticklen= 5,
        gridwidth= 2
    ),
    showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatter2010')
# Scatter plot 
trace = go.Scatter(
    y = feature_dataframe['Extra Trees  feature importances'].values,
    x = feature_dataframe['features'].values,
    mode='markers',
    marker=dict(
        sizemode = 'diameter',
        sizeref = 1,
        size = 25,
#       size= feature_dataframe['AdaBoost feature importances'].values,
        #color = np.random.randn(500), #set color equal to a variable
        color = feature_dataframe['Extra Trees  feature importances'].values,
        colorscale='Portland',
        showscale=True
    ),
    text = feature_dataframe['features'].values
)
data = [trace]
layout= go.Layout(
    autosize= True,
    title= 'Extra Trees Feature Importance',
    hovermode= 'closest',
#     xaxis= dict(
#         title= 'Pop',
#         ticklen= 5,
#         zeroline= False,
#         gridwidth= 2,
#     ),
    yaxis=dict(
        title= 'Feature Importance',
        ticklen= 5,
        gridwidth= 2
    ),
    showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatter2010')
# Scatter plot 
trace = go.Scatter(
    y = feature_dataframe['AdaBoost feature importances'].values,
    x = feature_dataframe['features'].values,
    mode='markers',
    marker=dict(
        sizemode = 'diameter',
        sizeref = 1,
        size = 25,
#       size= feature_dataframe['AdaBoost feature importances'].values,
        #color = np.random.randn(500), #set color equal to a variable
        color = feature_dataframe['AdaBoost feature importances'].values,
        colorscale='Portland',
        showscale=True
    ),
    text = feature_dataframe['features'].values
)
data = [trace]
layout= go.Layout(
    autosize= True,
    title= 'AdaBoost Feature Importance',
    hovermode= 'closest',
#     xaxis= dict(
#         title= 'Pop',
#         ticklen= 5,
#         zeroline= False,
#         gridwidth= 2,
#     ),
    yaxis=dict(
        title= 'Feature Importance',
        ticklen= 5,
        gridwidth= 2
    ),
    showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatter2010')
# Scatter plot 
trace = go.Scatter(
    y = feature_dataframe['Gradient Boost feature importances'].values,
    x = feature_dataframe['features'].values,
    mode='markers',
    marker=dict(
        sizemode = 'diameter',
        sizeref = 1,
        size = 25,
#       size= feature_dataframe['AdaBoost feature importances'].values,
        #color = np.random.randn(500), #set color equal to a variable
        color = feature_dataframe['Gradient Boost feature importances'].values,
        colorscale='Portland',
        showscale=True
    ),
    text = feature_dataframe['features'].values
)
data = [trace]
layout= go.Layout(
    autosize= True,
    title= 'Gradient Boosting Feature Importance',
    hovermode= 'closest',
#     xaxis= dict(
#         title= 'Pop',
#         ticklen= 5,
#         zeroline= False,
#         gridwidth= 2,
#     ),
    yaxis=dict(
        title= 'Feature Importance',
        ticklen= 5,
        gridwidth= 2
    ),
    showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatter2010')

feature_dataframe['mean'] = feature_dataframe.mean(axis= 1) # axis = 1 computes the mean row-wise
# 看一下目前的数据格式吧:
feature_dataframe.head(3)
y = feature_dataframe['mean'].values
x = feature_dataframe['features'].values
data = [go.Bar(
            x= x,
            y= y,
            width = 0.5,
            marker=dict(
               color = feature_dataframe['mean'].values,
            colorscale='Portland',
            showscale=True,
            reversescale = False
            ),
            opacity=0.6
        )]
layout= go.Layout(
    autosize= True,
    title= 'Barplots of Mean Feature Importance',
    hovermode= 'closest',
#     xaxis= dict(
#         title= 'Pop',
#         ticklen= 5,
#         zeroline= False,
#         gridwidth= 2,
#     ),
    yaxis=dict(
        title= 'Feature Importance',
        ticklen= 5,
        gridwidth= 2
    ),
    showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='bar-direct-labels')
x_train = np.concatenate(( et_oof_train, rf_oof_train, ada_oof_train, gb_oof_train, svc_oof_train), axis=1) #concatenate 拼接
x_test = np.concatenate(( et_oof_test, rf_oof_test, ada_oof_test, gb_oof_test, svc_oof_test), axis=1)
gbm = xgb.XGBClassifier(
    #learning_rate = 0.02,
    n_estimators= 2000,
    max_depth= 4,
    min_child_weight= 2,
    #gamma=1,
    gamma=0.9,                        
    subsample=0.8,
    colsample_bytree=0.8,
    objective= 'binary:logistic',
    nthread= -1,
    scale_pos_weight=1).fit(x_train, y_train)
predictions = gbm.predict(x_test)
# Generate Submission File 
StackingSubmission = pd.DataFrame({ 'PassengerId': PassengerId,
                            'Survived': predictions })
StackingSubmission.to_csv("StackingSubmission.csv", index=False)


###############劳工部员工离职案例
from sklearn.model_selection import ShuffleSplit
#n_splits 表示进行多少份的交叉验证
cv = ShuffleSplit(n_splits = 20,test_size = 0.3)
#建立随机森林模型
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
rf_model = RandomForestClassifier()
rf_param = {'n_estimators':range(1,11)}
rf_grid = GridSearchCV(rf_model,rf_param,cv = cv)
rf_grid.fit(X_train,y_train)
print('Parameter eith best score:')
print(rf_grid.best_params_)
print('Cross validation score:',rf_grid.best_score_) #0.9824920634920635
best_rf = rf_grid.best_estimator_
print('Test score:',best_rf.score(X_test,y_test)) #0.9837777777777778
#通过随机森林去看那个特征比较重要
features = X.columns
feature_importances = best_rf.feature_importances_
features_df = pd.DataFrame({'Features':features,'Importance Score':feature_importances})
features_df.sort_values('Importance Score',inplace = True,ascending = False)
print(features_df)

猜你喜欢

转载自blog.csdn.net/zhuisaozhang1292/article/details/82149255