Python实现主成分分析、决策树和随机森林完整过程

先附上代码,学习笔记回头再补充

完整代码如下:

# -*- coding: utf-8 -*-
# 关闭警告
# import warnings
# warnings.filterwarnings('ignore')

# 宏观数据和烟草数据回归分析  计算回归系数
import  pandas as pd
import numpy as np
import matplotlib.pyplot as plt

from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn import  metrics as mt
from sklearn import preprocessing as prep
from sklearn.decomposition import PCA
from  sklearn import tree
from sklearn import ensemble
import  random as rd

class CartTree():
    # #数据读取
    def data_read(self,data_path,file_name,typeid):
        '''
        数据读取
        :param data_path: 文件存储路径
        :param file_name: 文件名
        :param typeid: 价位段
        :return: 价位段原始数据和价位段去无关变量数据
        '''
        data = pd.read_excel(data_path + file_name, index_col='pack_bar').dropna()  # 删除缺失记录
        data1_type = data[data['typeid'].isin(typeid)]  # 取出价位段内记录
        # data1_type = data1_type[data1_type['ccom_id'].isin([11110001])]  # 取出某地市记录
        data_type = data1_type.drop(['typeid', 'month_double', 'ccom_id', 'net_month_double'], 1)  # ,'net_date'删除无关自变量
        return data1_type, data_type

    def outlier_filtrate(self,data_type,method='std',fill='nan',threshold=1):
        '''
        异常值处理机制
        :param data_type: 原始数据
        :param method: 处理方法,{'std':'正态异常','quantile':'箱线异常','threshold':'定值异常'}
        :param fill: 值填充方法,{'nan':'空值','extremum':'极值替换'}
        :param threshold: 异常值判断阈值,仅当method是threshold有效
        :return:新数据
        '''
        ncol = data_type.shape[1]
        colnames = data_type.columns
        colnames2 = list(filter(lambda x:x.find('_incr')>0,colnames))  # 仅判断增长率数据
        data2_type = data_type.copy()
        for i in range(ncol):
            datai = data2_type.iloc[:,i]
            # 正态异常
            if method == 'std':
                xmean = datai.mean()
                xstd = datai.std()
                up = xmean + 2 * xstd
                dw = xmean - 2 * xstd
                if any(datai > up):
                    # print('存在上限异常值')
                    if fill == 'nan':
                        data2_type.iloc[:, i][datai > up] = np.nan
                    else:
                        data2_type.iloc[:, i][datai > up] = datai[datai < up].max()
                else:
                    print('不存在上限异常值')
                if any(datai < dw):
                    # print('存在下限异常值')
                    if fill == 'nan':
                        data2_type.iloc[:, i][datai < dw] = np.nan
                    else:
                        data2_type.iloc[:, i][datai < dw] = datai[datai < dw].min()
                else:
                    print('不存在下限异常值')
            # 箱线图异常
            if method == 'quantile':
                q1 = datai.quantile(0.25)
                q3 = datai.quantile(0.75)
                up = q3 + 1.5 * (q3 - q1)
                dw = q1 - 1.5 * (q3 - q1)
                if any(datai > up):
                    # print('存在上限异常值')
                    if fill == 'nan':
                        data2_type.iloc[:, i][datai > up] = np.nan
                    else:
                        data2_type.iloc[:, i][datai > up] = datai[datai < up].max()
                else:
                    print('不存在上限异常值')
                if any(datai < dw):
                    # print('存在下限异常值')
                    if fill == 'nan':
                        data2_type.iloc[:, i][datai < dw] = np.nan
                    else:
                        data2_type.iloc[:, i][datai < dw] = datai[datai < dw].min()
                else:
                    print('不存在下限异常值')
            # 超过阈值异常
            if method == 'threshold':
                # 箱线图监测
                if colnames2.__contains__(colnames[i]):
                    up = threshold
                    dw = (-1.0)*threshold
                    if any(datai > up):
                        # print('存在上限异常值')
                        if fill == 'nan':
                            data2_type.iloc[:, i][datai > up] = np.nan
                        else:
                            data2_type.iloc[:, i][datai > up] = up
                    else:
                        print('不存在上限异常值')
                    if any(datai < dw):
                        # print('存在下限异常值')
                        if fill == 'nan':
                            data2_type.iloc[:, i][datai < dw] = np.nan
                        else:
                            data2_type.iloc[:, i][datai < dw] = dw
                    else:
                        print('不存在下限异常值')

        # temp = abs(data2_type[colnames2]) <= threshold  # 判断是否异常
        # lab = temp.apply(lambda x: x.min(), axis=1)  # 每行只要有异常值就为False
        data2_type = data2_type.dropna()  # 删除增长率在1以上的记录
        return data2_type

    def data_scale(self,data2_type,method='normalize'):  # 数据标准化
        '''
        数据标准化(归一化)
        :param data2_type: 原数据
        :param method: 标准化方法,{'minmax':'0-1标准化',
                                    'z-score':'正态标准化',
                                    'normalize':'归一化'
                                    'maxabs':'缩放比例为绝对值最大值,并保留正负号',
                                    'robust':'四分之一和四分之三分位点之间'}
        :return: 新数据
        '''
        if method == 'minmax':
            # 0-1标准化
            data_minmax = prep.minmax_scale(data2_type, feature_range=(0, 1), axis=0, copy=True)  # 直接用标准化函数
            data_scale = pd.DataFrame(data=data_minmax,columns=data2_type.columns,index=data2_type.index)
        elif method == 'z-score':
            # z-score标准化
            data_zs = prep.scale(data2_type, axis=0, with_mean=True, with_std=True, copy=True)  # 直接用标准化函数
            data_scale = pd.DataFrame(data=data_zs, columns=data2_type.columns, index=data2_type.index)
        elif method == 'normalize':
            # 归一化处理
            data_norm = prep.normalize(data2_type, norm='l2', axis=1)  # 直接用标准化函数
            data_scale = pd.DataFrame(data=data_norm, columns=data2_type.columns, index=data2_type.index)
        elif method == 'maxabs':
            # 数据的缩放比例为绝对值最大值,并保留正负号,即在区间[-1, 1]内。唯一可用于稀疏数据scipy.sparse的标准化
            data_ma = prep.maxabs_scale(data2_type, axis=0, copy=True)
            data_scale = pd.DataFrame(data=data_ma, columns=data2_type.columns, index=data2_type.index)
        elif method == 'robust':
            # 通过 Interquartile Range(IQR) 标准化数据,即四分之一和四分之三分位点之间
            data_rb = prep.robust_scale(data2_type, axis=0, with_centering=True, with_scaling=True, copy=True)
            data_scale = pd.DataFrame(data=data_rb, columns=data2_type.columns, index=data2_type.index)
        data3_type = data_scale
        return data3_type

    def data_factor(self, data3_type,replace='dependent',threshold=0.05,colnames=None):
        '''
        数据二值化
        :param data3_type: 原数据
        :param replace: 替换的列,{'all':'all','dependent':'因变量','colnames':'自己输入变量名'}
        :param threshold:二值化阈值
        :param colnames:list类型,存储列名,仅当replace值为colnames时有效
        :return:新数据
        '''
        # #######输入参数########
        # data_type:价位段内相关数据;
        # incr:变量因子化划分界限,如变量转换为0,1,则大于incr的变量等于1,小于incr的等于0
        # #######################
        data4_type = data3_type.copy()
        nrow = data4_type.shape[0]
        if replace == 'all':
            # 所有变量二值化
            data_binary = prep.binarize(data3_type, threshold=threshold,copy=True)  # 按照阈值threshold将数据转换成成0-1,小于等于threshold为 0
            data_new = pd.DataFrame(data=data_binary, columns=data4_type.columns, index=data4_type.index)
            data4_type = data_new
        elif replace == 'dependent':
            # 因变量二值化
            for i in range(nrow):
                value =  1 if data4_type.iloc[i, -1] > threshold else 0
                data4_type.iloc[i, -1] = value
        elif replace == 'colnames':
            # 指定变量二值化
            temp = data4_type[colnames]
            if colnames.__len__() > 1:
                data_binary = prep.binarize(temp, threshold=threshold, copy=True)  # 按照阈值threshold将数据转换成成0-1,小于等于threshold为 0
                data4_type[colnames] = pd.DataFrame(data=data_binary, columns=temp.columns, index=temp.index)
            else:
                for i in range(nrow):
                    value = 1 if temp.values[i] > threshold else 0
                    data4_type[colnames].values[i] = value
        # # 亚编码操作
        # 打印二值化后数据分布
        print(data4_type.iloc[:, -1].value_counts())
        # encoder = prep.OneHotEncoder()
        # X_OH = encoder.fit_transform(data3_type)  #
        # df = pd.DataFrame(X_OH.toarray())
        return data4_type

    def data_pca(self,data4_type,component=0.8,rstate=1234):
        '''
        主成分分析
        :param data4_type:分析数据
        :param component:信息贡献率
        :return:主成分数据
        '''
        data5_type = data4_type.copy()
        colnames = data5_type.columns
        X = data5_type.iloc[:,:-1]
        pca = PCA(n_components=component,random_state=rstate)  # 保证降维后的数据保持80%的信息
        pca.fit(X)
        X_new = pca.transform(X)
        comp = pca.components_  # 返回模型的特征向量
        dataComp = pd.DataFrame(data=comp, columns=colnames[:-1])
        print("各成分特征向量:")
        print(dataComp)
        vr = pca.explained_variance_ratio_  # 返回各成分的方差百分比
        colnames2 = ['CP'+str(i) for i in range(X_new.shape[1])]
        dataVR = pd.Series(data=vr*100, index=colnames2,name='方差贡献比(%)')
        print("各成分方差贡献比:")
        print(dataVR)
        dataX = pd.DataFrame(data=X_new,columns=colnames2)
        dataX.index = data5_type.index
        dataX[colnames[-1]] = data5_type.iloc[:,-1]
        data6_type = dataX
        print("主成分个数:"+str(X_new.shape[1]))
        return  data6_type, X_new.shape[1]

    # 利用决策树预测
    def cart_tree(self, data6_type,rstate=1234):
        # #######输入参数########
        # data_type:价位段内相关数据
        # #######################
        data_train, data_test = train_test_split(data6_type,test_size=0.3,random_state=rstate)  #
        col_names = data_train.columns
        X = data_train[col_names[:-1]]
        y = data_train[col_names[-1]]

        # 使用网格搜索法,选择最佳参数
        max_depth = np.arange(2,7,1)
        min_samples_leaf = np.arange(2,13,2)
        min_samples_split = np.arange(2,9,2)
        param = {'max_depth':max_depth,'min_samples_leaf':min_samples_leaf,'min_samples_split':min_samples_split}
        grid_cart = GridSearchCV(estimator=tree.DecisionTreeClassifier(),param_grid=param,cv=5,verbose=0)
        grid_cart.fit(X, y)
        # print("最优参数组合:")
        # print(grid_cart.best_params_);
        best_depth = grid_cart.best_params_.get('max_depth')
        best_samples_leaf = grid_cart.best_params_.get('min_samples_leaf')
        best_samples_split = grid_cart.best_params_.get('min_samples_split')

        # 模型
        estm = tree.DecisionTreeClassifier(max_depth=best_depth,min_samples_leaf=best_samples_leaf,min_impurity_split=best_samples_split)
        decision_tree = estm.fit(X, y)

        X_test = data_test[col_names[:-1]]
        y_test = data_test[col_names[-1]]
        predictY = estm.predict(X_test)
        acu = mt.accuracy_score(y_test, predictY)
        # print('模型预测准确率为:%.2f%%' % (acu*100))
        # print('\t')
        return estm, acu

    def random_forest(self, data6_type, rstate=1234):
        data_train, data_test = train_test_split(data6_type, test_size=0.3, random_state=rstate)  #
        col_names = data_train.columns
        X = data_train[col_names[:-1]]
        y = data_train[col_names[-1]]
        rf_class = ensemble.RandomForestClassifier(n_estimators=200, random_state=rstate) #包含200个决策树
        rf_class.fit(X, y)

        # 变量的重要性程度
        importance = rf_class.feature_importances_
        impt_series = pd.Series(importance,index=X.columns)
        # print("各变量重要性:")
        # print(impt_series)
        impt_series.sort_values().plot('barh')
        plt.show()
        # 预测准确率
        X_test = data_test[col_names[:-1]]
        y_test = data_test[col_names[-1]]
        predictY = rf_class.predict(X_test)
        acu = mt.accuracy_score(y_test, predictY)
        # print('模型预测准确率为:%.2f%%' % (acu * 100))
        # print('\t')
        return rf_class, acu

    def data_predict(self,data_path2,model,comps):
        # #######输入参数########
        # data_type:数据存储路径;
        # model:用来做预测的分析模型
        # comps:主成分个数
        # #######################
        # # 利用回归模型预测优先投放城市
        data_new = pd.read_excel(data_path2, index_col='pack_bar').dropna()  # 删除缺失记录
        data_new2 = data_new.drop(['month_double', 'ccom_id', 'net_month_double'], 1)  # 删除无关变量
        data2 = self.data_scale(data_new2, method='normalize')
        data3 = self.data_factor(data2, replace='dependent', threshold=0.05)
        data4, comps2= self.data_pca(data3, component=comps)
        # print(data2.columns)
        X = data4.iloc[:,:-1]
        predictY = model.predict(X)
        result = pd.Series(index=X.index.tolist(),data=predictY.tolist()) # 增长预测
        data_info = data_new[['ccom_id', 'net_month_double',data_new.columns[-1]]]
        data_info['real'] = data3.iloc[:,-1]

        return result,data_info


if __name__ == '__main__':
    # ##文件路径
    data_path = 'C:\\Users\\90539\\PycharmProjects\\data\\'
    file_name = 'data.xlsx'
    typeid = ['B']

    obj2 = CartTree()  # 创建实体
    data, data1 = obj2.data_read(data_path,file_name, typeid)
    # data11 = obj2.data_group_byType(data1)
    data2 = obj2.outlier_filtrate(data1,method='std',fill='nan',threshold=1)
    data3 = obj2.data_scale(data2,method='normalize')
    data4 = obj2.data_factor(data3,replace='dependent',threshold=0.05)
    rstate = rd.randint(9, 999)
    data6, comps = obj2.data_pca( data4,component=0.8,rstate=rstate)
    estm,  acu = obj2.cart_tree(data6,rstate)  # 线性SVM
    i = 0
    while i < 100 and acu < 0.75:
        i += 1
        rstate = rd.randint(9, 999)
        estm2,  acu2 = obj2.cart_tree(data6,rstate)  #
        if acu2 > acu:
            estm = estm2
            acu = acu2
    print("种子编号:" + str(rstate))
    print("第" + str(i + 1) + "次跑出最优结果:" + "\t")
    print('模型预测准确率为:%.2f%%' % (acu * 100))

    rfestm,  acu = obj2.random_forest(data6,rstate)  # 线性SVM
    i = 0
    while i < 100 and acu < 0.75:
        i += 1
        rstate = rd.randint(9, 999)
        rfestm2,  acu2 = obj2.random_forest(data6,rstate)  #
        if acu2 > acu:
            rfestm = rfestm2
            acu = acu2
    print("种子编号:" + str(rstate))
    print("第" + str(i + 1) + "次跑出最优结果:" + "\t")
    print('模型预测准确率为:%.2f%%' % (acu * 100))
    print("各变量重要性:")
    print(rfestm.feature_importances_)

    data_path2 = 'C:\\Users\\90539\\PycharmProjects\\tobacco-model\\\Deliver_Control\\data\\投放-实例.xlsx'
    incr_pack = obj2.data_predict(data_path2, rfestm, comps)
    pred = pd.DataFrame(incr_pack[0])
    data_result = pd.DataFrame(incr_pack[1])
    data_result['pred'] = pred

发布了49 篇原创文章 · 获赞 95 · 访问量 23万+

猜你喜欢

转载自blog.csdn.net/Trisyp/article/details/89499879