正式测试结果

左面和右面分别是用densenet网络提取的特征然后用欧式距离横向差异。结果也可以看出二者的重叠区域太大两者的样本数分别是21961个类,44761个图片,没有具体来度量他们的准确率,因为可以直观地看出不是很好。代码如下:

# -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 08:27:55 2018
@author: w
"""
import tqdm
from keras.models import Model
from keras.layers.convolutional import ZeroPadding2D,Conv2D
from keras.layers import Input,concatenate,Activation
from keras.layers.core import Dense, Dropout
from keras.layers.pooling import AveragePooling2D, GlobalAveragePooling2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
import keras.backend as K
from keras.utils import plot_model,np_utils
import matplotlib.pyplot as plt
from keras.optimizers import SGD
from custom_layers import Scale
import os
import numpy as np
from PIL import Image
def DenseNet(nb_dense_block=2, growth_rate=32, nb_filter=64, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, classes=61583, weights_path=None):
    eps = 1.1e-5
    compression = 1.0 - reduction
    global concat_axis
    if K.image_dim_ordering() == 'tf':
      concat_axis = 3
      img_input = Input(shape=(45, 32, 1), name='data')
    else:
      concat_axis = 1
      img_input = Input(shape=(1, 45, 32), name='data')
    nb_filter = 64
    nb_layers = [24,16]
    x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
    x = Conv2D(nb_filter,(7, 7), name='conv1',strides=(2, 2))(x)
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
    x = Scale(axis=concat_axis, name='conv1_scale')(x)
    x = Activation('relu', name='relu1')(x)
    x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)
    for block_idx in range(nb_dense_block - 1):
        stage = block_idx+2
        x, nb_filter = dense_block(x, stage, nb_layers[block_idx], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
        x = transition_block(x, stage, nb_filter, compression=compression, dropout_rate=dropout_rate, weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)
    final_stage = stage + 1
    x, nb_filter = dense_block(x, final_stage, nb_layers[-1], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv'+str(final_stage)+'_blk_bn')(x)
    x = Scale(axis=concat_axis, name='conv'+str(final_stage)+'_blk_scale')(x)
    x = Activation('relu', name='relu'+str(final_stage)+'_blk')(x)
    x = GlobalAveragePooling2D(name='pool'+str(final_stage))(x)
    x = Dense(61583, name='fc6')(x)
    x = Activation('softmax', name='prob')(x)
    model = Model(img_input, x, name='densenet')
    if weights_path is not None:
      model.load_weights(weights_path)
    return model
def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):
    eps = 1.1e-5
    conv_name_base = 'conv' + str(stage) + '_' + str(branch)
    relu_name_base = 'relu' + str(stage) + '_' + str(branch)
    inter_channel = nb_filter * 4  
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x1_bn')(x)
    x = Scale(axis=concat_axis, name=conv_name_base+'_x1_scale')(x)
    x = Activation('relu', name=relu_name_base+'_x1')(x)
    x = Conv2D(inter_channel, (1, 1), name=conv_name_base+'_x1',use_bias=False)(x)
    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x2_bn')(x)
    x = Scale(axis=concat_axis, name=conv_name_base+'_x2_scale')(x)
    x = Activation('relu', name=relu_name_base+'_x2')(x)
    x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
    x = Conv2D(nb_filter,(3, 3), name=conv_name_base+'_x2', use_bias=False)(x)
    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    return x
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
    eps = 1.1e-5
    conv_name_base = 'conv' + str(stage) + '_blk'
    relu_name_base = 'relu' + str(stage) + '_blk'
    pool_name_base = 'pool' + str(stage) 
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_bn')(x)
    x = Scale(axis=concat_axis, name=conv_name_base+'_scale')(x)
    x = Activation('relu', name=relu_name_base)(x)
    x = Conv2D(int(nb_filter * compression),( 1, 1), name=conv_name_base, use_bias=False)(x)
    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x)
    return x
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
    eps = 1.1e-5
    concat_feat = x
    for i in range(nb_layers):
        branch = i+1
        x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
        concat_feat = concatenate([concat_feat, x],axis=concat_axis, name='concat_'+str(stage)+'_'+str(branch))
        if grow_nb_filters:
            nb_filter += growth_rate
    return concat_feat, nb_filter

if __name__ == '__main__':
    model = DenseNet( )
    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
    model.load_weights('D:/python_work/fingerprint_part_model.h5')  
    feature=Model(inputs=model.input,outputs=model.get_layer('pool3').output)
    path_test='D:/python_work/Distence_sample/'
    pics=os.listdir(path_test)
    a_all=[]
    for pic in tqdm.tqdm(pics):
        srcs=os.listdir('D:/python_work/Distence_sample/'+pic)
        for i in range(len(srcs)):
            picture=Image.open('D:/python_work/Distence_sample/'+pic+'/'+srcs[i])
            arr = np.asarray(picture,dtype='float32')
            arr /= 8.
            arr = arr.reshape(1,45,32,1)
            feature_out=feature.predict(arr)
            e=np.random.randint(0,len(pics))
            picture_ot=os.path.join(path_test,pics[e])
            pictur2=os.listdir(picture_ot)
            arr1=Image.open(os.path.join(picture_ot,pictur2[np.random.randint(0,len(pictur2))]))
            arr1 = np.asarray(arr1,dtype='float32')
            arr1 /= 8.
            arr1 = arr1.reshape(1,45,32,1)
            feature_out1=feature.predict(arr1)
            a=feature_out-feature_out1
            a=np.square(a)
            a=np.sum(a)
            a=np.sqrt(a)
            a_all.append(a)
    number=len(a_all)
    n_part=range(number)
    plt.scatter(n_part, a_all)
    plt.show()
            
            
            
            
            
            
            
            
            

猜你喜欢

转载自blog.csdn.net/haoyu_does/article/details/84846754