申请评分卡——特征工程

点击下载要使用的数据

读取数据并导入相关包

import pandas as pd
import datetime
import collections
import numpy as np
import random
#from sklearn.preprocessing import MDLP
from numpy import nan as NA
from pandas import Series
import os 
from pandas import DataFrame
from numpy import nan as NA

#os.chdir('E://kaggle//value//')
data1=pd.read_csv('PPD_LogInfo_3_1_Training_Set.csv')
data2=pd.read_csv('PPD_Training_Master_GBK_3_1_Training_Set.csv')
data3=pd.read_csv('PPD_Userupdate_Info_3_1_Training_Set.csv')

使用时间切片衍生特征

1、将成功借款前的一段时间内操作过的代码的数量和代码的类别的数量作为衍生特征。

2、将成功借款前的一段时间内总的操作次数作为衍生特征。

3、将成功借款前的一段时间内是否修改过特定信息作为衍生特征。

#字符串日期改为程序能识别的时间,并且将两个日期相减得到两个操作的间隔时间。
data1['Loginfo3_t']=data1['LogInfo3'].map(lambda x :datetime.datetime.strptime(x,'%Y-%m-%d'))
data1['listinginfo_t']=data1['Listinginfo1'].map(lambda x:datetime.datetime.strptime(x,'%Y-%m-%d'))
data1['ListingGap'] = data1[['Loginfo3_t','listinginfo_t']].apply(lambda x :(x[1]-x[0]).days,axis=1)
print('最大时间间隔:',data1['ListingGap'].max())
#查看哪个时间间隔能包含最多样本
freq=TimeWindowSelection(data1,'ListingGap',range(30,720,30))
print(freq)
#在时间间隔180天的时候就可以看到包含了95%以上的样本,说明两个操作间使用者的操作都是180天内的,对180天进行时间切片提取特征最为合理
idx=list(set(list(set(data1['Idx']))+list(set(data2['Idx'].values))+list(set(data3['Idx'].values))))
#获取列名
LogInfo1_columns=GetColumns(data1,'LogInfo1')
LogInfo2_columns=GetColumns(data1,'LogInfo2')
#构建新数据库
data1_LogInfo1=DataFrame(np.zeros([len(idx),len(LogInfo1_columns)]),index=idx,columns=LogInfo1_columns)
data1_LogInfo2=DataFrame(np.zeros([len(idx),len(LogInfo2_columns)]),index=idx,columns=LogInfo2_columns)    
#借款前180天内所做的操作及所操作的代码和代码类别
data1['min_180']=data1['ListingGap'].map(lambda x : int(x<=180))
data1=data1.loc[data1['min_180']==1]
data1_set1=data1.set_index(['Idx','LogInfo1'],drop=False)
data1_set2=data1.set_index(['Idx','LogInfo2'],drop=False)
count=0
for i in data1['Idx'].unique():
    for j in data1_set1.ix[i,'LogInfo1'].unique():
        data1_LogInfo1.ix[i,'code_'+str(j)]=data1_set1.ix[(i,j),:].count()[0]
    count+=1
    print(count/len(data1['Idx'].unique()))
data1_LogInfo1.to_csv('data1_LogInfo1.csv')
count=0
for i in data1['Idx'].unique():
    for j in data1_set2.ix[i,'LogInfo2'].unique():
        data1_LogInfo2.ix[i,'code_'+str(j)]=data1_set2.ix[(i,j),:].count()[0]
    count+=1
    print(count/len(data1['Idx'].unique()))
change_columns=[]
for i in data1_LogInfo2.columns:
    change_columns.append(i+'_classes')
data1_LogInfo2.columns=change_columns
data1_LogInfo2.to_csv('data1_LogInfo2.csv')
#字符串日期改为程序能识别的时间,并且将两个日期相减得到两个操作的间隔时间。
data3['UserupdateInfo2_t']=data3['UserupdateInfo2'].map(lambda x :datetime.datetime.strptime(x,'%Y/%m/%d'))
data3['listinginfo_t']=data3['ListingInfo1'].map(lambda x:datetime.datetime.strptime(x,'%Y/%m/%d'))
data3['ListingGap'] = data3[['UserupdateInfo2_t','listinginfo_t']].apply(lambda x :(x[1]-x[0]).days,axis=1)
print('最大时间间隔:',data3['ListingGap'].max())
#查看哪个时间间隔能包含最多样本
freq=TimeWindowSelection(data3,'ListingGap',range(30,720,30))
print(freq)
#可以看出150天就包含95%的样本
data3['min_180']=data3['ListingGap'].map(lambda x : int(x<=180))
data3=data3.loc[data3['min_180']==1]
data3['UserupdateInfo1_l']=data3['UserupdateInfo1'].map(lambda x : x.lower())
#查看用户都修改了哪些信息,从中提取有用的特征
data3_UserupdateInfo1_l_columns=list(data3['UserupdateInfo1_l'].unique())
#较为应该注意的特征
notice_columns=['_hasbuycar','_marriagestatusid','_realname','_hasbusinesslicense','_idnumber','total_']
data3_UserupdateInfo1_l=DataFrame(np.zeros([len(idx),len(notice_columns)]),index=idx,columns=notice_columns)
data3_set=data3.set_index(['Idx'])
count=0
for i in data3_set.index.unique():
    if isinstance(data3_set.ix[i,'UserupdateInfo1_l'],str):
        data3_UserupdateInfo1_l.ix[i,'total_']=1
        if data3_set.ix[i,'UserupdateInfo1_l'] =='_hasbuycar':
            data3_UserupdateInfo1_l.ix[i,'_hasbuycar']=1
        elif data3_set.ix[i,'UserupdateInfo1_l']=='_marriagestatusid':
            data3_UserupdateInfo1_l.ix[i,'_marriagestatusid']=1
        elif data3_set.ix[i,'UserupdateInfo1_l']=='_realname':
            data3_UserupdateInfo1_l.ix[i,'_realname']=1
        elif data3_set.ix[i,'UserupdateInfo1_l']=='_hasbusinesslicense':
            data3_UserupdateInfo1_l.ix[i,'_hasbusinesslicense']=1
        elif data3_set.ix[i,'UserupdateInfo1_l']=='_idnumber':
            data3_UserupdateInfo1_l.ix[i,'_idnumber']=1
        else:
            continue
    else: 
        data3_UserupdateInfo1_l.ix[i,'total_']=data3_set.ix[i,'UserupdateInfo1_l'].count()
        for j in data3_set.ix[i,'UserupdateInfo1_l'].unique():  
            if j =='_hasbuycar':
                data3_UserupdateInfo1_l.ix[i,'_hasbuycar']=1
            elif j=='_marriagestatusid':
                data3_UserupdateInfo1_l.ix[i,'_marriagestatusid']=1
            elif j=='_realname':
                data3_UserupdateInfo1_l.ix[i,'_realname']=1
            elif j=='_hasbusinesslicense':
                data3_UserupdateInfo1_l.ix[i,'_hasbusinesslicense']=1
            elif j=='_idnumber':
                data3_UserupdateInfo1_l.ix[i,'_idnumber']=1
            else:
                continue
    count+=1
    print(count/np.shape(data3_set.index.unique())[0])
data3_UserupdateInfo1_l.to_csv('data3_UserupdateInfo1_l.csv')
#数据合并
data1_fo1=pd.read_csv('data1_LogInfo1.csv')
data1_fo1.rename(columns={'Unnamed: 0':'Idx'},inplace=True)
data1_fo2=pd.read_csv('data1_LogInfo2.csv')
data1_fo2.rename(columns={'Unnamed: 0':'Idx'},inplace=True)
data3_fo1=pd.read_csv('data3_UserupdateInfo1_l.csv')
data3_fo1.rename(columns={'Unnamed: 0':'Idx'},inplace=True)
merge1=pd.merge(data2,data1_fo1,on='Idx')
merge2=pd.merge(merge1,data1_fo2,on='Idx')
merge3=pd.merge(merge2,data3_fo1,on='Idx')
string=pd.read_csv('string.csv')

将连续型变量与类别型变量分开

#字符型变量与数值型变量
string_dic=dict(zip(string['变量名称'],string['变量类型']))
Numerical=list(data1_fo1.columns)+list(data1_fo2.columns)+['total_']   
Categorical=['_hasbuycar','_marriagestatusid','_realname','_hasbusinesslicense','_idnumber']
another=[]
for i in data2.columns:
    if string_dic[i]=='Categorical':
        Categorical.append(i)
    elif string_dic[i] == 'Numerical':
        Numerical.append(i)
    else:
        another.append(i)
Numerical.remove('target')
Numerical.remove('ListingInfo')
Numerical.remove('Idx')
Numerical.remove('Idx')

识别、填充和删除缺失值

#编制填补缺失值的函数    
def MakeupMissing(df,col,type,method,dict1,list1):
    validDf=df.loc[df[col]==df[col]]
    dict1.update({col:validDf.shape[0]*1.0/df.shape[0]})
    missingList = [i for i in df[col]]
    if validDf.shape[0] == df.shape[0]:
        print('在以下特征中{}没有缺失值'.format(col)) 
    #创建一个副本,防止影响原数据框
    else:
        if type =='Continuous':
            if method not in ['Mean','Random']:
                print('请明确使用正确的方法')
            else:
                if validDf.shape[0]*1.0/df.shape[0]>0.7:
                    descStats=validDf[col].describe()
                    mu=descStats['mean']
                    std=descStats['std']
                    maxVal=descStats['max']
                    #根据切比雪夫不等式,超过3个标准差的数出现概率很低,基本可以推断为极端值
                    if maxVal > mu+3*std:
                        for i in list(validDf.index):
                            if validDf.loc[i][col]>mu+3*std:
                                validDf.loc[i][col]=mu+3*std
                        mu=validDf[col].describe()['mean']
                    for i in range(df.shape[0]):
                        if df.loc[i][col] != df.loc[i][col]:
                            if method == 'Mean':
                                missingList[i]=mu
                            elif method =='Random':
                                missingList[i]=random.sample(validDf[col],1)[0]
                    print('列{0}的缺失值已经用{1}方法填充'.format(col,method))
                else:
                    list1.append(col)
                    print('在以下{}特征中缺失值太多应予以删除'.format(col))
        elif type =='Categorical':
            if method not in ['Mode','Random']:
                print('请明确使用正确的方法')
            else:
                if validDf.shape[0]*1.0/df.shape[0]>0.7:
                    freqDict={}
                    recdNum = validDf.shape[0]
                    for v in set (validDf[col]):
                        vDf = validDf.loc[validDf[col] == v]
                        freqDict[v] = vDf.shape[0]*1.0/recdNum
                    #得出众数,key指的是定义以哪个数值为评价指标
                    modeVal = max(freqDict.items(),key=lambda x : x[1])[0]
                    freqTuple = freqDict.items()
                    freqList=[0]+[i[1] for i in freqTuple]
                    freqCumsum = np.cumsum(freqList)
                    for i in range(df.shape[0]):
                        if df.loc[i][col] != df.loc[i][col]:
                            if method == 'Mode':
                                missingList[i]=modeVal
                            elif method == 'Random':
                                a=random.random()
                                position = [k+1 for k in range(len(freqCumsum)-1) if freqCumsum[k]<a<=freqCumsum[k+1]][0]
                                missingList[i] = list(freqTuple)[position-1][0]
                    print('列{0}的缺失值已经用{1}方法填充'.format(col,method))
                else:
                     list1.append(col)
                     print('在以下{}特征中缺失值太多予以删除'.format(col))
    return missingList
#填充与删除缺失值
all_dict={}
all_list=[]
count=0
for j in Numerical:
    count+=1
    merge3[j]=MakeupMissing(merge3,j,'Continuous','Mean',all_dict,all_list)
    print(count/len(Categorical+Numerical))
for k in Categorical:
    count+=1
    merge3[k]=MakeupMissing(merge3,k,'Categorical','Random',all_dict,all_list)
    print(count/len(Categorical+Numerical))
#删除缺失值较多的特征
merge3.drop(all_list,axis=1,inplace=True)
merge3.to_csv('merge_na.csv',encoding="utf_8_sig")

进行卡方分箱,提高模型的稳定性

1、本次使用的是以最大分箱数与阈值作为界限的,服从自由度为1的卡方分布,选择5%置信区间,故而卡方值为3.84

2、两两计算卡方值,最后将最小的卡方值合并,直至满足上述条件。

3、需要注意的是每一分箱样本所占比例必须大于5%以上,5%以下的组别与相邻组合并。

4、最后坏样本(可以是好样本,总的来说就是某一目标类别所占比例必须单调)比例样本必须单调。

#计算卡方值
def Chi2(df,total_col,bad_col,good_col,overallRate):
    df2 = df.copy()
    df2['expected_bad'] = df[total_col].apply(lambda x: x*overallRate)
    df2['expected_good'] = df[total_col].apply(lambda x: x*(1-overallRate))
    df2['ka_bad'] = df2[[bad_col,'expected_bad']].apply(lambda x :((x[0]-x[1]))**2/x[1],axis=1)
    df2['ka_good'] = df2[[good_col,'expected_good']].apply(lambda x :((x[0]-x[1]))**2/x[1],axis=1)
    chi=list(df2['ka_bad'])+list(df2['ka_good'])
    chi2 = sum(chi)
    return chi2
#进行分箱
def ChiMerge_MinChisq(df, col, target, confidenceVal = 3.841):
    colLevels = set(df[col])
    total = df.groupby([col])[target].count()
    total = pd.DataFrame({'total':total})
    bad = df.groupby([col])[target].sum()
    bad = pd.DataFrame({'bad':bad})
    regroup =  total.merge(bad,left_index=True,right_index=True, how='left')
    regroup['good']=regroup[['bad','total']].apply(lambda x :x[1]-x[0],axis=1)
    regroup.reset_index(level=0, inplace=True)
    colLevels =sorted(list(colLevels))
    groupIntervals = [[i] for i in colLevels]
    groupNum  = len(groupIntervals)
    if groupNum >2 :
        while(1):  
            if len(groupIntervals) <= 5:
                break
            chisqList = []
            for interval in range(len(groupIntervals)-1):
                df2 = regroup.loc[regroup[col].isin(groupIntervals[interval]+groupIntervals[interval+1])]
                overallRate=df2['bad'].sum()/df2['total'].sum()
                chisq = Chi2(df2, 'total','bad','good',overallRate)
                chisqList.append(chisq)
            min_position = chisqList.index(min(chisqList))
            if min(chisqList) > confidenceVal:
                break
            combinedPosition = min_position + 1
            groupIntervals[min_position] = groupIntervals[min_position]+groupIntervals[combinedPosition]
            groupIntervals.remove(groupIntervals[combinedPosition])
            groupNum = len(groupIntervals)
        a=1
        while(a == 1):
            for i in range(len(groupIntervals)):
                df2 = regroup.loc[regroup[col].isin(groupIntervals[i])]
                if df2['total'].sum()/regroup['total'].sum()<= 0.05 and i+1 !=len(groupIntervals):
                    groupIntervals[i] = groupIntervals[i]+groupIntervals[i+1]
                    groupIntervals.remove(groupIntervals[i+1])
                    break
                if df2['total'].sum()/regroup['total'].sum()<= 0.05 and i+1 ==len(groupIntervals):
                    groupIntervals[i-1] = groupIntervals[i-1]+groupIntervals[i]
                    groupIntervals.remove(groupIntervals[i])
                    a=0
                    break
                if df2['total'].sum()/regroup['total'].sum() > 0.05 and i+1 ==len(groupIntervals):
                    a=0
    return groupIntervals

#进行分箱
merge3=pd.read_csv('merge_na.csv')
drop_columns=[]
for i in list(data2.columns):
    if i not in list(merge3.columns):
        drop_columns.append(i)
for i in drop_columns:
    if i in Numerical:
        Numerical.remove(i)
    else:
        Categorical.remove(i)
drop_columns=[]
accetp_columns={}
count=0
for i in Numerical:
    chi_min = ChiMerge_MinChisq(merge3, i, 'target', confidenceVal = 3.841)
    if len(chi_min) < 2 :
        drop_columns.append(i)
    else:
        accetp_columns.update({i:chi_min})
    count+=1
    print(count/len(Numerical))

WOE编码

未完待续

IV值计算(可以识别出该特征对于识别目标变量的重要程度)并以IV值进行选择

未完待续

猜你喜欢

转载自blog.csdn.net/pandacode/article/details/82118977