keras实现VGG16(预测一张图片)

from keras.applications.vgg16 import VGG16#直接导入已经训练好的VGG16网络
from keras.preprocessing.image import load_img#load_image作用是载入图片
from keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import decode_predictions

model = VGG16()
image = load_img('D:\\photo\\dog.jpg',target_size=(224,224))#参数target_size用于设置目标的大小,如此一来无论载入的原图像大小如何,都会被标准化成统一的大小,这样做是为了向神经网络中方便地输入数据所需的。
image = img_to_array(image)#函数img_to_array会把图像中的像素数据转化成NumPy中的array,这样数据才可以被Keras所使用。
#神经网络接收一张或多张图像作为输入,也就是说,输入的array需要有4个维度: samples, rows, columns, and channels。由于我们仅有一个 sample(即一张image),我们需要对这个array进行reshape操作。
image = image.reshape((1,image.shape[0],image.shape[1],image.shape[2]))
image = preprocess_input(image)#对图像进行预处理
y = model.predict(image)#预测图像的类别
label = decode_predictions(y)#Keras提供了一个函数decode_predictions(),用以对已经得到的预测向量进行解读。该函数返回一个类别列表,以及类别中每个类别的预测概率,
label = label[0][0]
print('%s(%.2f%%)'%(label[1],label[2]*100))
# print(model.summary())
from keras.models import  Sequential
from keras.layers.core import Flatten,Dense,Dropout
from keras.layers.convolutional import Convolution2D,MaxPooling2D,ZeroPadding2D
from keras.optimizers import SGD
import numpy as np

from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
import time
from keras import backend as K
K.set_image_dim_ordering('th')
def VGG_16(weights_path=None):
    model = Sequential()

    model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax'))

    if weights_path:
        model.load_weights(weights_path,by_name=True)

    return model

model = VGG_16(weights_path='F:\\Kaggle\\vgg16_weights.h5')
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')

t0 = time.time()
img = image.load_img('D:\\photo\\dog.jpg', target_size=(224, 224))
x = image.img_to_array(img)  # 三维(224,224,3)
x = np.expand_dims(x, axis=0)  # 四维(1,224,224,3)#因为keras要求的维度是这样的,所以要增加一个维度
x = preprocess_input(x)  # 预处理
print(x.shape)
y_pred = model.predict(x)  # 预测概率

t1 = time.time()

print("测试图:", decode_predictions(y_pred))  # 输出五个最高概率(类名, 语义概念, 预测概率)
print("耗时:", str((t1 - t0) * 1000), "ms")

这是两种不同的方式,第一种是直接使用vgg16的参数,需要在运行时下载,第二种是我们已经下载好的权重,直接在参数中输入我们的路径即可。

猜你喜欢

转载自blog.csdn.net/sunshunli/article/details/81456566