数据分析师养成之路之python篇:(keras 中 roc和混淆矩阵)

本篇主要集中实现 roc和混淆矩阵(至于模型,这里随便建了一个(知道是个模型就好,当然太low,最后实现的效果也不太好),但,这里重点是除模型以外的内容)
陈述至此,开始进入正题—请看代码!
导入所有用到的包

from keras.datasets import cifar10
from keras.layers import Input, Dense, Dropout, Activation, Flatten
from keras.utils import to_categorical

from keras import Model,models
from keras.models import Sequential, load_model, Model
from keras.layers import Convolution2D, MaxPooling2D
import keras

from keras.callbacks import Callback

from sklearn.metrics import roc_auc_score

from sklearn.metrics import roc_curve,auc
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
from sklearn.preprocessing import label_binarize

import numpy as np

下面是用到的 ROC,混淆矩阵的函数模块

def paintRoc(y_true,y_score):
    fpr,tpr,thresholds=roc_curve(y_true.ravel(),y_score.ravel())
    roc_auc=auc(fpr,tpr)
    plt.plot(fpr,tpr,lw=5,alpha=0.8,color='r',label='Roc(AUC=%0.2f)'%(roc_auc))
    plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
         label='Luck', alpha=.8)
    plt.xlabel('FPR')
    plt.ylabel('TPR')
    plt.title('ROC_auc(AUC=%0.2f)'%(roc_auc))
    plt.legend(loc="lower right")
    plt.show()
def paintConfusion_float(lmr_matrix,classes):
    plt.imshow(lmr_matrix,interpolation='nearest',cmap=plt.cm.Blues)
    plt.title('confusion matrix')
    plt.colorbar()
    tick_marks=np.arange(len(classes))
    plt.xticks(tick_marks,classes,rotation=45)
    plt.yticks(tick_marks,classes)
    plt.xlabel('Pre label')
    plt.ylabel('True label')
    lmr_matrix=lmr_matrix.astype('float')/lmr_matrix.sum(axis=1)[:,np.newaxis]
    fmt='.2f'
    thresh=lmr_matrix.max()/2.
    for i,j in itertools.product(range(lmr_matrix.shape[0]),range(lmr_matrix.shape[1])):
        plt.text(j, i, format(lmr_matrix[i, j], fmt),
                     horizontalalignment="center",
                     color="black" if lmr_matrix[i, j] > thresh else "red")
    plt.tight_layout()
    plt.show()

如果亲看过我前面的文章,请留意,本篇和之前的函数略有不同!

载入数据和模型(这里模型,实在不忍直视,,能跳就跳吧 )

(X_train,y_train),(X_test,y_test)=cifar10.load_data()
y_test=label_binarize(y_test,np.arange(10))
y_train=label_binarize(y_train,np.arange(10))

X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255

x=Input(shape=(32,32,3))
y=x
y = Convolution2D(filters=64, kernel_size=3, strides=1, padding='same', activation='relu', kernel_initializer='he_normal')(y)
y = Convolution2D(filters=64, kernel_size=3, strides=1, padding='same', activation='relu', kernel_initializer='he_normal')(y)
y = MaxPooling2D(pool_size=2, strides=2, padding='valid')(y)

y=Flatten()(y)
y=Dropout(0.2)(y)
y=Dense(units=10,activation='softmax')(y)

model=Model(inputs=x,outputs=y,name='model')
model.compile(loss='categorical_crossentropy',optimizer='Adam',metrics=['accuracy'])
model.summary()

callbacks模块:

class RocAuc(keras.callbacks.Callback):
    def __init__(self,validation_data,interval=1):
        self.interval=interval
        self.x_val,self.y_val=validation_data
    def on_epoch_end(self,epoch, logs={}):
        if epoch % self.interval == 0:

            y_score=self.model.predict(self.x_val,verbose=0)

            paintRoc(self.y_val,y_score)
#             score=roc_auc_score(self.y_val,y_score)
#             print('\n    --ROC_AUC - epoch:%d - score:%.6f \n' % (epoch + 1, score*100))
class Confusion(keras.callbacks.Callback):
    def __init__(self,validation_data,interval=1):
        self.interval=interval
        self.x_val,self.y_val=validation_data
    def on_epoch_end(self,epoch, logs={}):
        if epoch % self.interval == 0:

            pred_y=self.model.predict(self.x_val,verbose=0)
            true_label=np.argmax(self.y_val,axis=1)
            pred_label=np.argmax(pred_y,axis=1)
            lmr_matrix=confusion_matrix(true_label,pred_label)
            paintConfusion_float(lmr_matrix,set(true_label))


rocauc=RocAuc(validation_data=(X_test,y_test))
Conf=Confusion(validation_data=(X_test,y_test))

训练模型

History=model.fit(X_train,y_train,batch_size=32,epochs=2,validation_data=(X_test,y_test),callbacks=[rocauc,Conf])

结尾小插曲

plt.plot(History.history['loss'],color='g',label='train_loss')
plt.plot(History.history['val_loss'],color='b',label='val_loss')
plt.legend(loc="lower right")

猜你喜欢

转载自blog.csdn.net/lulujiang1996/article/details/81540321