数据分析师养成之路之keras:(Modelcheckpoint,交叉验证等实现篇)

1.数据集的划分和打乱:
主要学习一下,permutation的使用方法,代码简单,这里不多讲解

data=np.load(open('/home/LLwang/桌面/wang/bottle_train.npy','rb'))
train_labels=fold1train_generator.classes
y=utils.to_categorical(train_labels,2)
permutation1=np.random.permutation(int(len(data)*0.30))
test_data=data[permutation1,:,:]
test_label=y[permutation1]
permutation2=np.random.permutation(int(len(data)*0.70))
train_data=data[permutation2,:,:]
train_label=y[permutation2]

2.keras中的交叉验证:

from sklearn.model_selection import StratifiedKFold,train_test_split
# cross validation
# n-fold=5
skf = StratifiedKFold(n_splits=5)
for cnt,(train,test) in enumerate(skf.split(data,lic1)):
#注意,如何取数据!当然若是df型,df.iloc[train]取值
    train_data=data[train,:,:]
    test_data=data[test,:,:]
    train_labels=lic1
    y=utils.to_categorical(train_labels,2)
    y_train=y[train]
    y_test=y[test]
    #只输出最好的
    filepath="/home/mrewang/桌面/wang/weights.best.hdf5"
    #每提高一次,输出一次
   #filepath='weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5'
   #为保存val_acc最大时模型的权重
    mc=ModelCheckpoint(filepath,monitor='val_acc',verbose=1,save_best_only=True,mode='max')
    callbacks_list=[mc]
    model.fit(train_data,y_train,epochs=20, batch_size=32,validation_data=(test_data,y_test),callbacks=callbacks_list)

3.load weight

model.load_weights('/home/LLwang/桌面/wang/weights.best.hdf5')
model.compile(optimizer='...',
              loss='binary_crossentropy',metrics=['accuracy'])
scores=model.evaluate(test_data,test_label,verbose=0)
print('%s:%.2f%%'%(model.metrics_names[1],scores[1]*100))
print(model.predict(test_data))

猜你喜欢

转载自blog.csdn.net/lulujiang1996/article/details/81167362