python数据分析实例--探寻幸福感(numpy/pandas/sklearn)

本文数据来源为天池实验室挖掘幸福感项目。本文对数据集进行探索性分析后,进行了一系列的数据预处理工作,并针对预处理后的数据进行建模调参,最终得到优化后模型的拟合结果。本文的目的一是通过对该数据的处理和建模,预测影响幸福感的影响因素;二是通过该过程建立一个较为通用的分析建模模版,快速运用到其他项目当中。

# -*- coding: utf-8 -*-
"""
探寻幸福感
Created on 2019-5-21 21:38:05
Updated on 2019-12-7 16:08:17
@author: lzy
"""

# 模版设置
# 训练集名称 happiness_train
# 测试集名称 happiness_test

# 查看和设置工作目录
import os
os.getcwd()
os.chdir('E:\\python')

# 载入常用包
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from scipy.stats import norm
from sklearn.decomposition import pca
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from xgboost.sklearn import XGBRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import BaggingRegressor
from lightgbm import LGBMRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler # 标准化
from sklearn.model_selection import GridSearchCV
from sklearn import preprocessing

# 读取数据
happiness_train = pd.read_csv('happiness_train_abbr.csv')
happiness_test  = pd.read_csv('happiness_test_abbr.csv')

# 查看变量分布情况
pd.set_option('display.max_columns',None) # 强制显示describe所有内容
train_describe = happiness_train.describe()
print(train_describe)

# 绘制目标值正态分布图--处理完目标缺失值再用
sns.distplot(happiness_train['happiness'], fit=norm) # 此处将绘制出一幅目标值的直方图,以及一条拟合曲线和正态分布曲线
(mu, sigma) = norm.fit(happiness_train['happiness']) # 获取图例值
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],loc='best') # 绘制图例
plt.ylabel('Frequency') # 添加y轴标签

# 绘制目标值PP图
fig = plt.figure() # 默认绘图对象宽度和高度
res = stats.probplot(happiness_train['happiness'],plot=plt)
plt.show()

# 查看是否有缺失值
happiness_train_na = pd.read_csv('happiness_train_abbr.csv',na_values='-8') # 另外提一份设置na值的以统计缺失情况
train_missing = happiness_train_na.isnull().sum() / len(happiness_train_na) # 计算各变量缺失值比例
print(train_missing)

# 用热力图观察变量和目标之间的相关性
corrmat = happiness_train.corr()
fig = plt.figure(figsize=(20,9))
# f, ax = plt.subplots(figsize=(20, 9)) 换成这句效果相似
sns.heatmap(corrmat, vmax=0.8, annot=True)
plt.show()

# 训练集预处理
'''
id 删除 yes
happiness剔除-8 yes
county剔除 yes
survey_time剔除 yes
birth换算为age yes
nationality将-8都替换为1 yes
religion填充众数1 yes
religion_freq填充众数1 yes
edu 根据年龄插补(相关0.42) yes
income<0 填充中位数(去掉负数后的均值) yes
political填充众数1 yes
height_cm,weight_jin暂不处理(后期如需进一步研究再处理)
health -8替换为和health_problem一致(相关性0.54)
health_problem -8替换为和health_problem一致 (最后再将两列中-8全替换为3,防止有两列都缺失的) yes
depression -8替换为health(相关性0.42) yes
socialize -8替换为3 yes
relax 替换为socialize yes
learn和edu(相关0.52) 计算edu对应learn值 四舍五入替换 yes
equity -8替换为3 yes
family_status -8替换为3 yes
class 替换为family_status*2-1 yes
work_status 删除 yes
work_yr 删除 yes
work_type 删除 yes
work_manage 删除 yes
family_m > 10 替换为10 <1 替换为2 yes
family_income = (income+mean(income))/2*family_m yes
house <0 替换为1 yes
car -8替换为2 yes
status_peer -8替换为2 yes 
status_3_before -8替换为2 yes
view -8替换为3 yes
inc_ability -8替换为2 yes

特征处理(高级):(未进行)
城市和收入结合
个人收入和家庭收入结合
'''

'''https://www.cnblogs.com/gczr/p/6761613.html
https://blog.csdn.net/qq_32618817/article/details/80587228  # Python中的groupby分组
接下来的工作:
删除缺失目标值的行
删除不需要的列
新增列,删除列
定义替换函数,逐个替换
对特殊处理的单独再处理

对处理后的数据描述统计/探索,合理后划分训练样本和测试样本
'''

# 查看数据集行列数
train_processed.shape

# 数据预处理
train_processed = happiness_train_na # 这里使用设置了na值的,方便预处理,保留源数据集,赋值一份新数据来处理
train_processed = train_processed.drop(['id','survey_time','county','work_status','work_yr','work_type','work_manage'],axis=1) # 删除多列
train_processed = train_processed[~train_processed['happiness'].isnull()]
train_processed.describe()
# np.shape(train_processed) # 检查预处理后的结果

# 定义一个na值替换函数
# train_processed.loc[train_processed['nationality'].isnull(),'nationality'] = 1 # 方法1,应用这个方法来定义一个函数,简化代码
def na_replace(column, value):
    train_processed.loc[train_processed[column].isnull(), column] = value

# 数据预处理1
na_replace('nationality', 1) # 注意这里变量要用引号括起来
na_replace('religion', 1)
na_replace('religion_freq', 1)
na_replace('political', 1)
na_replace('socialize', 3)
na_replace('equity', 3)
na_replace('family_status', 3)
train_processed.loc[train_processed['family_m']>10,'family_m'] = 10
train_processed.loc[train_processed['family_m'] < 1,'family_m'] = 2
train_processed.loc[train_processed['house'] < 0,'house'] = 1
na_replace('car', 2)
na_replace('status_peer', 2)
na_replace('status_3_before', 2)
na_replace('view', 3)
na_replace('inc_ability', 2)
train_processed['age'] = 2015 - train_processed['birth']
train_processed = train_processed.drop('birth', axis=1)

train_processed.loc[train_processed['relax'].isnull(),'relax'] = train_processed['socialize']
train_processed.loc[train_processed['class'].isnull(),'class'] = train_processed['family_status'] * 2 - 1

# income的处理,先将负数转成nan,再一起将nan全部替换为income的均值
train_processed.loc[train_processed['income']<0,'income'] = np.nan
na_replace('income',train_processed['income'].mean())

train_processed.loc[train_processed['family_income'].isnull(),'family_income'] = (train_processed['income'] + train_processed['income'].mean())/2 * train_processed['family_m']
train_processed.loc[train_processed['family_income'] < 0,'family_income'] = (train_processed['income'] + train_processed['income'].mean())/2 * train_processed['family_m']
train_processed.loc[train_processed['health'].isnull(),'health'] = train_processed['health_problem']
train_processed.loc[train_processed['health_problem'].isnull(),'health_problem'] = train_processed['health']
na_replace('health', 3)
na_replace('health_problem', 3)
train_processed.loc[train_processed['depression'].isnull(),'depression'] = train_processed['health']

# edu 根据年龄插补(相关0.42)
edu_groupby_age = train_processed['edu'].groupby(train_processed['age']).mean().apply(np.round)
edu_groupby_age = pd.DataFrame(edu_groupby_age) # 将Series格式转为DataFrame以进行left join操作
train_processed = pd.merge(train_processed, edu_groupby_age, how='left', left_on='age', right_on='age', copy=True)
train_processed.loc[train_processed['edu_x'].isnull(),'edu_x'] = train_processed['edu_y']
train_processed = train_processed.drop('edu_y',axis=1)
train_processed = train_processed.rename(columns={
    
    'edu_x':'edu'})

# learn和edu(相关0.52) 计算edu对应learn值 四舍五入替换
learn_groupby_edu = train_processed['learn'].groupby(train_processed['edu']).mean().apply(np.round)
learn_groupby_edu = pd.DataFrame(learn_groupby_edu) # 将Series格式转为DataFrame以进行left join操作
train_processed = pd.merge(train_processed, learn_groupby_edu, how='left', left_on='edu' , right_on='edu', copy=True)
train_processed.loc[train_processed['learn_x'].isnull(),'learn_x'] = train_processed['learn_y']
train_processed = train_processed.drop(['learn_y'],axis=1)
train_processed = train_processed.rename(columns={
    
    'learn_x':'learn'})

# 数据预处理结果检查
train_processed.isnull().any() # 检查缺失值是否处理完成(全为False完成)
train_processed_missing = train_processed.isnull().sum() / len(train_processed) # 检查是否替换缺失值成功
print(train_processed_missing)

# 对训练数据预处理,分离变量和目标值
X = train_processed.values[:, 1:34] # 提取训练集变量
Y = train_processed.values[:, 0] # 提取训练集目标值
# X1_test = test_processed.values # 这里暂时不对测试集训练

# PCA数据处理--降维后效果不好略去
# pca = pca.PCA(n_components=10) # 设置PCA模型
# pca.fit(X) # 用训练集X来训练PCA模型
# X_pca = pca.transform(X) # 用训练后的模型返回训练集降维结果
# 以上两步和fit_transform 效果一致 X_pca2 = pca.fit_transform(X), 必须得有fit这个过程
# X1_pca = pca.transform(X1_test) # 用训练后的模型返回测试集降维结果
# print(pca.explained_variance_) # 特征值
# print(pca.explained_variance_ratio_) # 方差贡献

# 可用模型
# https://www.cnblogs.com/charlesblc/p/6159355.html 模型选择
# https://blog.csdn.net/yeoman92/article/details/75051848
'''
GBR
Random Forest
Cart
SVR
Adaboost
XGBoost
lightGBM
神经网络 https://blog.csdn.net/qq_32023541/article/details/81019721
'''

'''模型训练'''
# 分离出训练集和测试集,用于模型训练和验证
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=40)
# X_scale = preprocessing.scale(X) --如需标准化用这两句
# X_train_scale, X_test_scale, Y_train, Y_test = train_test_split(X_scale, Y, test_size=0.2, random_state=40)

# GradientBoostingRegressor(GBR)模型
myGBR = GradientBoostingRegressor()
myGBR.fit(X_train, Y_train) # 用设置好的GBR模型去训练(拟合)训练集的变量和目标值
Y_pred_GBR = myGBR.predict(X_test) # 用训练好的模型去预测训练集划分出来的测试集的y值
GBR_mse = mean_squared_error(Y_test, Y_pred_GBR) # 计算测试集实际值与预测值的mse
print('GBR模型MSE为: %.4f ' % GBR_mse)

# 1.使用默认learning_rate=0.1
# 2.对n_estimators调参(根据最小std)
# 3.对min_samples_split和max_depth同时调参(或者只对max_depth调参)
# 4.对min_samples_split和min_samples_leaf同时调参
# 5.对max_features调参
# 6.对subsample调参
# 7.对learning_rate调参

# RandomForest(RF)随机森林模型
myRF = RandomForestRegressor() # 此处还未调参 n_estimators=500, max_features='auto', max_depth=10, min_samples_split=10, verbose=1
myRF.fit(X_train, Y_train)
Y_pred_RF = myRF.predict(X_test)
RF_mse = mean_squared_error(Y_test, Y_pred_RF)
print('RandomForest模型MSE为: %.4f ' % RF_mse)

# DecisionTreeRegressor(DTR)决策树回归模型
myDTR = DecisionTreeRegressor(random_state=111)
myDTR.fit(X_train, Y_train)
Y_pred_DTR = myDTR.predict(X_test)
DTR_mse = mean_squared_error(Y_test, Y_pred_DTR)
print('DecisionTreerRegressor模型MSE为: %.4f ' % DTR_mse)

# SupportVectorRegressor(SVR)模型
# https://www.cnblogs.com/zinyy/p/9535069.html
mySVR = SVR()
mySVR.fit(X_train, Y_train)
Y_pred_SVR = mySVR.predict(X_test)
SVR_mse = mean_squared_error(Y_test, Y_pred_SVR)
print('SupportVectorRegressor模型MSE为: %.4f ' % SVR_mse)

# XGBRegressor模型
myXGB = XGBRegressor(random_state=111)
myXGB.fit(X_train, Y_train)
Y_pred_XGB = myXGB.predict(X_test)
XGB_mse = mean_squared_error(Y_test, Y_pred_XGB)
print('XGBRegressor模型MSE为: %.4f ' % XGB_mse)

# Adaboost模型
myADB = AdaBoostRegressor(random_state=111)
myADB.fit(X_train, Y_train)
Y_pred_ADB = myADB.predict(X_test)
ADB_mse = mean_squared_error(Y_test, Y_pred_ADB)
print('AdaboostRegressor模型MSE为: %.4f ' % ADB_mse)

# KNeighborsRegressor(KNN)模型
myKNN = KNeighborsRegressor()
myKNN.fit(X_train, Y_train)
Y_pred_KNN = myKNN.predict(X_test)
KNN_mse = mean_squared_error(Y_test, Y_pred_KNN)
print('KNeighborsRegressor模型MSE为: %.4f' % KNN_mse)

# BaggingRegressor模型
myBAG = BaggingRegressor()
myBAG.fit(X_train, Y_train)
Y_pred_BAG = myBAG.predict(X_test)
BAG_mse = mean_squared_error(Y_test, Y_pred_KNN)
print('BaggingRegressor模型MSE为: %.4f ' % BAG_mse)

# LGBMRegressor模型
# https://blog.csdn.net/huacha__/article/details/81057150
# https://www.cnblogs.com/webRobot/p/10019448.html 调参过程参考
myLGB = LGBMRegressor()
myLGB.fit(X_train, Y_train)
Y_pred_LGB = myLGB.predict(X_test)
LGB_mse = mean_squared_error(Y_test, Y_pred_LGB)
print('LGBMRegressor模型MSE为: %.4f ' % LGB_mse)

print('GBR模型MSE为: %.4f ' % GBR_mse)
print('RandomForest模型MSE为: %.4f ' % RF_mse)
print('DecisionTreerRegressor模型MSE为: %.4f ' % DTR_mse)
print('SupportVectorRegressor模型MSE为: %.4f ' % SVR_mse)
print('XGBRegressor模型MSE为: %.4f ' % XGB_mse)
print('KNeighborsRegressor模型MSE为: %.4f' % KNN_mse)
print('BaggingRegressor模型MSE为: %.4f ' % BAG_mse)
print('LGBMRegressor模型MSE为: %.4f ' % LGB_mse)

# 初始模型MSE,选择最小的三个进行调参优化:GBR,XGB,LGB
# GBR模型MSE为: 0.4573
# RandomForest模型MSE为: 0.5196
# DecisionTreerRegressor模型MSE为: 0.9756
# SupportVectorRegressor模型MSE为: 0.6731
# XGBRegressor模型MSE为: 0.4571
# KNeighborsRegressor模型MSE为: 0.7730
# BaggingRegressor模型MSE为: 0.7730
# LGBMRegressor模型MSE为: 0.4679

'''GBR调参'''
# 1.对n_estimators调参
param_test1 = {
    
    'n_estimators':range(10,300,10)}
gsearch1 = GridSearchCV(estimator=GradientBoostingRegressor(alpha=0.9,
                                  criterion='friedman_mse',
                                  init=None,
                                  learning_rate=0.1,
                                  loss='huber',
                                  max_depth=5,
                                  max_features=None,
                                  max_leaf_nodes=None,
                                  min_impurity_decrease=0.0,
                                  min_impurity_split=None,
                                  min_samples_leaf=5,
                                  min_samples_split=3,
                                  min_weight_fraction_leaf=0.0,
                                  presort='auto',
                                  random_state=111,
                                  subsample=0.8,
                                  verbose=1,
                                  warm_start=False), # 选择使用的分类器,并且传入除需要确定最佳的参数之
                        param_grid=param_test1,
                        scoring=None,
                        iid=False,
                        n_jobs=-1,
                        cv=None
                        )

gsearch1.fit(X_train, Y_train)
gsearch1.best_params_, gsearch1.best_score_ # 输出最优n_estimators:50

## 调参过程省略

# 7.最终模型
myGBR = GradientBoostingRegressor(alpha=0.9,
                                  criterion='friedman_mse',
                                  init=None,
                                  learning_rate=0.12,
                                  loss='huber',
                                  max_depth=5,
                                  max_features=13,
                                  max_leaf_nodes=None,
                                  min_impurity_decrease=0.0,
                                  min_impurity_split=None,
                                  min_samples_leaf=34,
                                  min_samples_split=2,
                                  min_weight_fraction_leaf=0.0,
                                  n_estimators=50,
                                  presort='auto',
                                  random_state=111,
                                  subsample=0.65,
                                  verbose=1,
                                  warm_start=False)
myGBR.fit(X_train, Y_train) # 用设置好的GBR模型去训练(拟合)训练集的变量和目标值
Y_pred_GBR = myGBR.predict(X_test) # 用训练好的模型去预测训练集划分出来的测试集的y值
GBR_mse = mean_squared_error(Y_test, Y_pred_GBR)
print('GBR模型调参后MSE为: %.4f ' % GBR_mse)) # 计算测试集实际值与预测值的mse,最终模型MSE: 0.4568

'''XGB调参'''
# https://cloud.tencent.com/developer/article/1080593 调参过程参考
# 1.对n_estimators调参
param_test1 = {
    
    'n_estimators':range(10,300,10)}
gsearch1 = GridSearchCV(estimator=XGBRegressor(learning_rate=0.1,
                                               max_depth=5,
                                               min_child_weight=1,
                                               subsample=0.8,
                                               colsample_bytree=0.8,
                                               gamma=0,
                                               reg_alpha=0,
                                               reg_lambda=1,
                                               random_state=111), # 选择使用的分类器,并且传入除需要确
                        param_grid=param_test1,
                        scoring=None,
                        iid=False,
                        n_jobs=-1,
                        cv=None
                        )

gsearch1.fit(X_train, Y_train)
gsearch1.best_params_, gsearch1.best_score_ # 输出最优n_estimators:60

## 调参过程省略

# 7.最终模型-XGB
myXGB = XGBRegressor(n_estimators = 60,
                     max_depth = 4,
                     min_child_weight = 6,
                     subsample = 0.8,
                     colsample_bytree = 0.9,
                     gamma=0.3,
                     reg_alpha = 2,
                     reg_lambda = 3,
                     random_state=111)
myXGB.fit(X_train, Y_train) # 用设置好的GBR模型去训练(拟合)训练集的变量和目标值
Y_pred_XGB = myXGB.predict(X_test) # 用训练好的模型去预测训练集划分出来的测试集的y值
XGB_mse = mean_squared_error(Y_test, Y_pred_XGB)
print('XGB模型调参后MSE为: %.4f ' % XGB_mse) # 计算测试集实际值与预测值的mse,最终模型MSE: 0.4569

# 拟合效果展示
def prediction_effect(Y_pred, mse):
    plt.figure()
    plt.plot(np.arange(len(Y_test)), Y_test, 'go-', label='TURE VALUE')
    plt.plot(np.arange(len(Y_pred)), Y_pred, 'ro-', label='PREDICT VALUE')
    plt.title('模型MSE为: %.4f ' % mse)
prediction_effect(Y_pred_GBR, GBR_mse)
prediction_effect(Y_pred_XGB, XGB_mse)

猜你喜欢

转载自blog.csdn.net/lzykevin/article/details/103437694