数据清洗 总结:

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/zhuisaozhang1292/article/details/82149150

数据清洗过程可能用到的一些方法记录: 

# 数据挖掘的主要流程
import pandas as pd
#******************** 构造数据集*******************#
df = pd.read_csv('data.csv')
#构造新的Dataframe
kd = pd.DataFrame({'matchup':df.matchup,'opponent':df.opponent})

# #  skiprows  需要忽略的行数(从文件开始处算起),或需要跳过的行号列表(从0开始)
# loans_2007 = pd.read_csv('./Lending Club Statistics/LoanStats3a.csv',skiprows = 1) 

#******************** 数据预处理*******************#
#查看数据集基本情况
print(df.shape())
print(df.head())
print(df.tail())

# half_count = len(loans_2007) / 2
# loans_2007 = loans_2007.dropna(thresh = half_count,axis=1) #某列特征缺失数量超过半数 则丢弃

loans_2007.drop_duplicates() #loans_2007中一行元素全部相同时才去除

#空值/Nan值处理
titanic['Age'] = titanic['Age'].fillna(titanic['Age'].median()) #均值填充

#去除某个特征字段
drops = ['action_type', 'combined_shot_type', 'game_event_id']
for drop in drops:
    df = df.drop(drop,1)
print(df.keys())
#清除一些我们无法直接利用的一些特征:
drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp']
train = train.drop(drop_elements, axis = 1)

# #删除特征只有一个状态的值
# print(loans_2007.shape) #(39786, 35)
# orig_columns = loans_2007.columns
# drop_columns = []
# for col in orig_columns:
#     col_series = loans_2007[col].dropna().unique() #放置某个特征只有nan 和另一个固定值
#     if len(col_series) == 1:
#         drop_columns.append(col)
# loans_2007 = loans_2007.drop(drop_columns,axis = 1)
# print(drop_columns) #(39786, 24)

#更改列名
df.rename(columns = {'average_montly_hours':'average_monthly_hours',
                     'sales':'department'},inplace = True)
#获取某个特征属性值为空的数据
print(df[pd.isnull(df['shot_made_flag'])]) 
print(df.isnull().any()) #判断是否有null值
#获取某一特征中不重复的属性
print(df.action_type.unique())
#获取特征中不同属性出现的次数
print(df['department'].value_counts())



#查看某一特征与输出分类之间的关系      离散值--》 对应分类
import matplotlib.pyplot as plt
import seaborn as sns
plot = sns.factorplot(x='department',y='left',kind='bar',data = df)
plot.set_xticklabels(rotation = 45, horizontalalignment = 'right')
plt.show() 
#分析职位为经理的 薪资分布情况:
df[df['department'] == 'management']['salary'].value_counts().plot(kind = 'pie',title = 'Management salary level distribution')
plt.show()
#通过柱形图分析员工对公司满意程度 与 是否离职的情况
bins = np.linspace(0.0001,1.0001,21)
plt.hist(df[df['left'] == 1]['satisfaction_level'],bins = bins,alpha = 0.7,label = 'Employees Left')
plt.hist(df[df['left'] == 0]['satisfaction_level'],bins = bins,alpha = 0.5,label = 'Employees Stayed')
plt.xlabel('satisfaction_level')
plt.xlim((0,1.05))
plt.legend(loc = 'best')
plt.show()

#查看两个数值型特征之间的相关性
import matplotlib.pyplot as plt
plt.figure(figsize=(5,5))
print(df.keys())#检查发现原始数据没有dist字段
plt.scatter(df.dist,df.shot_distance,color = 'blue')
plt.title('dist and shot_distance')
plt.show()


#对特征的重要性进行排序 并以柱状图的形式显示出来
import numpy as np
from sklearn.feature_selection import SelectKBest,f_classif
import matplotlib.pyplot as plt
predictors = ['Pclass','Sex','Age','SibSp','Parch','Fare','Embarked','FamilySize','Title','NameLength']
selector = SelectKBest(f_classif,k=5)
selector.fit(titanic[predictors],titanic['Survived'])
scores = -np.log10(selector.pvalues_)
plt.bar(range(len(predictors)),scores)
plt.xticks(range(len(predictors)),predictors,rotation = 'vertical')
plt.show()

# #强大的可视化库 可以查看到不同特征之间的关系 以及分布
import seaborn as sb
sb.pairplot(iris_data.dropna(),hue = 'class')#第一个参数为数据集 第二个数据集为 类别列名
plt.figure(figsize=(10,10))
for column_index,column in enumerate(iris_data.columns):
    if column == 'class':
        continue
    plt.subplot(2,2,column_index+1)
    sb.violinplot(x= 'class', y=column, data = iris_data)
plt.show()

#查看特征之间的Pearson相关系数
import seaborn as sns
colormap = plt.cm.viridis
plt.figure(figsize=(14,12))
plt.title('Pearson Correlation of Features', y=1.05, size=15)
sns.heatmap(train.astype(float).corr(),linewidths=0.1,vmax=1.0, square=True, cmap=colormap, linecolor='white', annot=True)

###     降维处理

#PCA
from sklearn.preprocessing import StandardScaler
X, y = iris.data, iris.target
X_std = StandardScaler().fit_transform(X)
cov_mat = np.cov(X_std.T)#协方差矩阵
eig_vals,eig_vecs = np.linalg.eig(cov_mat)#构造特征值 特征矩阵
#查看多少个特征 能保证原始数据的90%信息
eig_pairs = [(np.abs(eig_vals[i]),eig_vecs[:,i]) for i in range(len(eig_vals))]
eig_pairs.sort(key = lambda x:x[0],reverse = True)
tot = sum(eig_vals)
var_exp = [(i/tot)*100 for i in sorted(eig_vals,reverse = True)] #换算成百分比 因为这样能知道到底需要几个特征 能反应原始数据90%的信息
# print(var_exp) #[72.77045209380137, 23.030523267680636, 3.6838319576273824, 0.515192680890629]
cum_var_exp = np.cumsum(var_exp) #累计和
# print(cum_var_exp) #[ 72.77045209  95.80097536  99.48480732 100.  

#     One-hot编码
# get_dummies 返回数据为指定列的 onehot编码
data_oneshot = pd.get_dummies(df['combined_shot_type'],prefix = 'combined_shot_type') #prefix为设置列名前缀,可以不加 试一下效果
print(data_oneshot)
print(df.keys())
#因为特征项中存在字符串 例如 性别等 所以需要将他转换为 int或float型 ;同样也可以使用one-hot编码
titanic.loc[titanic['Sex'] == 'male','Sex'] = 0 #
titanic.loc[titanic['Sex'] == 'female','Sex'] = 1
# 将性别映射至0,1
dataset['Sex'] = dataset['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# 将Fare分成四类0-3 针对连续特征 离散化处理
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare']                                 = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare']   = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare']                                     = 3
dataset['Fare'] = dataset['Fare'].astype(int)


#     标准化/归一化
from sklearn.preprocessing import StandardScaler
import numpy as np
stdsc = StandardScaler()
X_example = np.array([[1.,-2.,2.],
                      [5.,3.,2.],
                      [0.,1.,-10.]])
X_example = stdsc.fit_transform(X_example)
X_example = pd.DataFrame(X_example) #记得删除旧特征或者建立新数据库
# data = data.drop(['Time','Amount'],axis=1) #删除就旧特征
print(X_example)
print(X_example.describe())

#     样本均衡处理(过采样 欠采样)
##############查看样本类别分布情况 会发现0样本接近30万个  1样本只有几百个
count_classes = pd.value_counts(data['Class'],sort = True).sort_index()
count_classes.plot(kind = 'bar')
plt.title('Fraud class histogram')
plt.xlabel('Class')
plt.ylabel('Frequency')
plt.show()
##############解决样本不均衡方法1 抽取较多样本数量的样本  个数为 较少样本的个数 从而达到  1:1
# X = data.ix[:,data.columns != 'class'] #抽取不含class的样本 就相当于去除标签值以后的样本
# y = data.ix[:,data.columns == 'class'] #抽取标签值 就相当于 只含有样本标签
# number_records_fraud = len(data[data.Class == 1]) #获取样本标签为1 的样本数量
# fraud_indices = np.array(data[data.Class == 1].index) #获取样本标签为1 的样本标签的索引
# 
# normal_indices = data[data.Class == 0].index #获取样本标签为0 的样本标签的索引
# random_normal_indices = np.random.choice(normal_indices,number_records_fraud,replace = False) #在正常样本索引normal_indices中随机抽取样本数量为number_records_fraud的样本
# random_normal_indices = np.array(random_normal_indices)
# 
# under_sample_indices= data.concatenate([fraud_indices,random_normal_indices])#拼接样本索引为 fraud_indices 和 random_normal_indices 形成新的索引
# under_sample_data = data.iloc[under_sample_indices,:] #获取索引值对应的数据
# X_undersample = under_sample_data.ix[:,under_sample_data.columns != 'Class'] #获取under_sample_data中去除Class列的数据
# y_undersample = under_sample_data.ix[:,under_sample_data.columns != 'Class'] #获取under_sample_data对应的标签值
# 
# print('Percentage of normal transcations:',len(under_sample_data.Class == 0)/len(under_sample_data)) #查询0样本占的比例
# print('Percentage of fraud transcations:',len(under_sample_data.Class == 1)/len(under_sample_data)) #查询1样本占的比例
# print('Total number of transactions in resampled data:',len(under_sample_data)) #查询总样本数
##############解决样本不均衡方法2 将数据少的样本量生成至样本多的量
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
# features_train,features_test,labels_train,label_test = train_test_split(features,labels,test_size = 0.2,random_state = 0)
# oversampler = SMOTE(random_state=0)
# os_features,os_labels = oversampler.fit_sample(features_train,labels_train)
# # print(len(os_labels[os_labels == 1]))
# os_features = pd.DataFrame(os_features)
# os_labels = pd.DataFrame(os_labels)

#******************** 数据转换(特征工程)*******************#



#******************** 数据建模*******************#

#     训练测试集分类
# from sklearn.cross_validation import train_test_split
# #划分原始数据样本
# X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state = 0)
# print('Number transactions train dataset:',len(X_train))
# print('Number transactions test dataset:',len(X_test))
# print('Total number of ransaction:',len(X_train) + len(X_test))
# #划分均衡处理后样本
# X_train_undersample,X_test_undersample,y_train_undersample,y_test_undersample = train_test_split(X_undersample,y_undersample,test_size = 0.3,random_state = 0)
# print('')
# print('Number transactions train dataset:',len(X_train_undersample))
# print('Number transactions test dataset:',len(X_test_undersample))
# print('Total number of ransaction:',len(X_train_undersample) + len(X_test_undersample))

猜你喜欢

转载自blog.csdn.net/zhuisaozhang1292/article/details/82149150
今日推荐