【深度学习】 Keras 实现Minst数据集上经典网络结构(DeepDense、LeNet、AlexNet、ZFNet)分类

实验简介

  本次实验一方面是熟悉Keras 序列式(Sequential)模型的使用,另一方面是复现早期的经典网络结构来学习神经网络搭建的技巧。数据集采用的是熟知的Minst手写识别,框架采用的是tensorflow、Keras,数据集和框架的导入和安装请点击这里。经典的网络结构已有大量博客进行理论分析,这里只给出代码仅供参考,关于神经网络结构的发展,推荐大家看这篇文章

DeepDense

  这个是自己定义的名字,也就是深度全连接网络。

# -*- coding: utf-8 -*-
"""
Created on Thu Jun 13 11:19:33 2019

@author: YLC
"""

from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.utils import np_utils

#数据集导入模块
(X_train, y_train), (X_test, y_test) = mnist.load_data();

#参数定义模块
img_rows, img_cols = 28,28# input dimensions
batch_size = 64
num_classes = 10
epochs = 2
img_shape = (img_rows,img_cols,1) 

#预处理 标准化模块    
X_train = X_train.reshape(len(X_train), -1)
X_test = X_test.reshape(len(X_test), -1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = (X_train - 127) / 127
X_test = (X_test - 127) / 127

#分类转One-Hot模块
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)

#网络搭建模块
model = Sequential()

model.add(Dense(512, input_shape=(784,), kernel_initializer='he_normal'))#全连接层
model.add(Activation('relu'))
model.add(Dropout(0.2)) 

model.add(Dense(512, kernel_initializer='he_normal'))
model.add(Activation('relu'))
model.add(Dropout(0.2)) 

model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

#运行调参模块
#epochs是迭代的次数,暂定2次是为了迅速看结果,最后改成5次
model.fit(X_train, y_train, epochs=epochs, batch_size= batch_size, verbose=1, validation_split=0.05)
loss, accuracy = model.evaluate(X_test, y_test)

#输出模块
print('Test loss:', loss)
print('Accuracy:', accuracy)
model.summary()

LeNet

# -*- coding: utf-8 -*-
"""
Created on Thu Jun 13 11:19:33 2019

@author: YLC
"""

import numpy as np
import matplotlib.pyplot as plt

from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout
from keras.utils import np_utils

#数据集导入模块
(X_train, y_train), (X_test, y_test) = mnist.load_data();

#参数定义模块
img_rows, img_cols = 28,28# input dimensions
batch_size = 64
num_classes = 10
epochs = 2
img_shape = (img_rows,img_cols,1) 

#预处理 标准化模块
X_train = X_train.reshape(-1, img_rows, img_cols, 1)  # normalize
X_test = X_test.reshape(-1, img_rows, img_cols, 1)    # normalize
X_train = X_train / 255
X_test = X_test / 255

#分类转One-Hot模块
y_train = np_utils.to_categorical(y_train, num_classes=num_classes)
y_test = np_utils.to_categorical(y_test, num_classes=num_classes)

#网络搭建模块
model = Sequential()

model.add(Conv2D(input_shape=img_shape, kernel_size=(5, 5), filters=20, activation='relu'))#卷积层
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))#池化层

model.add(Conv2D(kernel_size=(5, 5), filters=50,  activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

#运行调参模块
#epochs是迭代的次数,暂定2次是为了迅速看结果,最后改成5次
model.fit(X_train, y_train, epochs = epochs, batch_size = batch_size, verbose=1, validation_split=0.05)
loss, accuracy = model.evaluate(X_test, y_test)

#输出模块
print('Test loss:', loss)
print('Accuracy:', accuracy)
model.summary()

AlexNet

# -*- coding: utf-8 -*-
"""
Created on Thu Jun 13 11:19:33 2019

@author: YLC
"""
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
import tensorflow as tf
 
#数据集导入模块
(X_train, y_train), (X_test, y_test) = mnist.load_data();

#参数定义模块
img_rows, img_cols = 28,28# input dimensions
batch_size = 64
num_classes = 10
epochs = 5
img_shape = (img_rows,img_cols,1) 
 
#预处理 标准化模块
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
X_train = X_train / 255
X_test = X_test / 255

#分类转One-Hot模块
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)

#网络搭建模块
model = Sequential()

model.add(Conv2D(input_shape=img_shape, kernel_size=(11, 11), filters=96, activation='relu',strides= [1,1],padding= 'valid'))#卷积层
model.add(MaxPooling2D(pool_size=(3,3), strides=[2,2]))#池化层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(5,5), filters=256, activation='relu',strides= [1,1],padding= 'same'))#卷积层
model.add(MaxPooling2D(pool_size=(3,3), strides=[2,2]))#池化层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(3,3), filters=384,activation='relu',strides= [1,1],padding= 'same'))#卷积层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(3,3), filters=384, activation='relu',strides= [1,1],padding= 'same'))#卷积层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(3,3), filters=256, activation='relu',strides= [1,1],padding= 'same'))#卷积层
model.add(MaxPooling2D(pool_size=(3,3), strides=[2,2]))#池化层
model.add(BatchNormalization(axis= 1))

model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5)) 
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5)) 
model.add(Dense(num_classes, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
 

# 训练配置,仅供参考
model.fit(X_train, y_train, epochs=epochs, batch_size= batch_size, verbose=1, validation_split=0.05)
loss, accuracy = model.evaluate(X_test, y_test)

#输出模块
print('Test loss:', loss)
print('Accuracy:', accuracy)    
model.summary()

ZFNet

# -*- coding: utf-8 -*-
"""
Created on Thu Jun 13 11:19:33 2019

@author: YLC
"""
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
import tensorflow as tf
 
#数据集导入模块
(X_train, y_train), (X_test, y_test) = mnist.load_data();

#参数定义模块
img_rows, img_cols = 28,28# input dimensions
batch_size = 64
num_classes = 10
epochs = 5
img_shape = (img_rows,img_cols,1) 
 
#预处理 标准化模块
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)


#分类转One-Hot模块
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)

#网络搭建模块
model = Sequential()

model.add(Conv2D(input_shape=img_shape, kernel_size=(7,7), filters=96, activation='relu',strides= [1,1],padding= 'valid'))#卷积层
model.add(MaxPooling2D(pool_size=(3,3), strides=[2,2]))#池化层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(5,5), filters=256, activation='relu',strides= [2,2],padding= 'same'))#卷积层
model.add(MaxPooling2D(pool_size=(3,3), strides=[1,1]))#池化层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(3,3), filters=384,activation='relu',strides= [1,1],padding= 'same'))#卷积层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(3,3), filters=384, activation='relu',strides= [1,1],padding= 'same'))#卷积层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(3,3), filters=256, activation='relu',strides= [1,1],padding= 'same'))#卷积层
model.add(MaxPooling2D(pool_size=(3,3), strides=[1,1]))#池化层
model.add(BatchNormalization(axis= 1))

model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5)) 
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5)) 
model.add(Dense(num_classes, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
 

# 训练配置,仅供参考
model.fit(X_train, y_train, epochs=epochs, batch_size= batch_size, verbose=1, validation_split=0.05)
loss, accuracy = model.evaluate(X_test, y_test)

#输出模块
print('Test loss:', loss)
print('Accuracy:', accuracy)
model.summary()
发布了22 篇原创文章 · 获赞 6 · 访问量 4147

猜你喜欢

转载自blog.csdn.net/qq_34862636/article/details/100576043