Keras loads its own trained convolutional neural network for visualization

View neural network

import keras_metrics as km
from keras.engine.saving import load_model
dependencies = {
        'categorical_precision': km.categorical_precision(),
        'categorical_recall':km.categorical_recall(),
        'categorical_f1_score':km.categorical_f1_score()
}
model = load_model("D:/Users/JiajunBernoulli/MyProject/Tigers/stripes_detection_by_keras/right/models/ResNet34_0_1/tiger.models.h5", custom_objects=dependencies)

model.summary()

Note that input_1 is layer 0, not layer 1.
Insert picture description here

Get the target layer

By looking at the neural network, the index of the target layer can be found, and then the target layer can be obtained through the index.

import keras_metrics as km
from keras.engine.saving import load_model
dependencies = {
        'categorical_precision': km.categorical_precision(),
        'categorical_recall':km.categorical_recall(),
        'categorical_f1_score':km.categorical_f1_score()
}

model = load_model("D:/Users/JiajunBernoulli/MyProject/Tigers/stripes_detection_by_keras/right/models/ResNet34_0_2/tiger.models.h5", custom_objects=dependencies)
# redefine model to output right after the first hidden layer
ixs=[2, 4]
outputs = [model.layers[i].output for i in ixs]
for output in outputs:
        print(output)

Acquired the second and fourth floors.

Insert picture description here

Convolution and pooling comparison

Load picture

# load the image with the required shape
img = load_img('./images/bird.jpg', target_size=(224, 224))
img = img_to_array(img)
img = expand_dims(img, axis=0)
img = preprocess_input(img)

Prediction picture

According to the loaded model and picture, the result is a four-dimensional array. The first dimension corresponds to different hidden layers, the second and third dimensions correspond to picture pixels, and the fourth dimension corresponds to the results of different filters on the same layer.

# get feature map for first hidden layer
model = Model(inputs=model.inputs, outputs=outputs)
feature_maps = model.predict(img)
print(len(feature_maps))

conv_map=feature_maps[0]
print(conv_map.shape)‘

pool_map=feature_maps[1]
print(pool_map.shape)

There are only two target layers, and there are only two predicted outputs.
Insert picture description here

Save result

Save directly to keep the original size so that you can see the size change after pooling. If you are drawing, you need to adjust the pixels to keep the original size very troublesome.

pyplot.imsave("conv_map.png",conv_map[0, :, :, 0], cmap='gray')
pyplot.imsave("pool_map.png",pool_map[0, :, :, 0], cmap='gray')

It is easy to see the effect of pooling
Insert picture description here

Comparison of different filters

This code originated from a netizen abroad, I simply modified it.

# visualize feature maps output from each block in the vgg model
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.engine.saving import load_model
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.models import Model
from matplotlib import pyplot
from numpy import expand_dims
import keras_metrics as km
# load the model
dependencies = {
        'categorical_precision': km.categorical_precision(),
        'categorical_recall':km.categorical_recall(),
        'categorical_f1_score':km.categorical_f1_score()
}

model = load_model("D:/Users/JiajunBernoulli/MyProject/Tigers/stripes_detection_by_keras/right/models/ResNet34_0_2/tiger.models.h5", custom_objects=dependencies)
# redefine model to output right after the first hidden layer
ixs = [2, 5, 7]
outputs = [model.layers[i].output for i in ixs]
# load the image with the required shape
img = load_img('./images/bird.jpg', target_size=(224, 224))
img = img_to_array(img)
img = expand_dims(img, axis=0)
img = preprocess_input(img)

# get feature map for first hidden layer
model = Model(inputs=model.inputs, outputs=outputs)
feature_maps = model.predict(img)

# plot the output from each block
square = 8
for fmap in feature_maps:
	# plot all 64 maps in an 8x8 squares
	ix = 1
	for _ in range(square):
		for _ in range(square):
			# specify subplot and turn of axis
			ax = pyplot.subplot(square, square, ix)
			ax.set_xticks([])
			ax.set_yticks([])
			# plot filter channel in grayscale
			# print(fmap.shape)
			pyplot.imshow(fmap[0, :, :, ix-1], cmap='gray')
			ix += 1
	# show the figure
	pyplot.show()

Insert picture description here

Guess you like

Origin blog.csdn.net/weixin_44112790/article/details/99705233