TensorFlow 实现简单的卷积网络

《TensorFlow实战》之《TensorFlow实现卷积神经网络》
讲解如何实现一个简单的卷积神经网络,并保存训练模型实现最后的预测,使用的数据集是MNIST
网络结构:两个卷积层+一个全连接层
代码文件:creat_model_CNN.py
predict.py

实现详解:
1.creat_model_CNN.py

"""
Created on Thu Nov 16 20:50:24 2017

@author: ASUS
"""

载入MNIST数据集

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

定义参数的初始化函数:权值和偏置
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

myGraph = tf.Graph()
with myGraph.as_default():
    with tf.name_scope('inputsAndLabels'):
        x_raw = tf.placeholder(tf.float32, shape=[None, 784])
        y = tf.placeholder(tf.float32, shape=[None, 10])

定义第一个卷积层 #尺寸为5X5,1个通道,32个不同的卷积核

    with tf.name_scope('hidden1'):
        x = tf.reshape(x_raw, shape=[-1,28,28,1])
        W_conv1 = weight_variable([5,5,1,32])
        b_conv1 = bias_variable([32])
        l_conv1 = tf.nn.relu(tf.nn.conv2d(x,W_conv1, strides=[1,1,1,1],padding='SAME') + b_conv1)
        #最大池化层
        l_pool1 = tf.nn.max_pool(l_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
        #l_conv1尺寸:28x28x32,l_pool1尺寸:14x14x32

        tf.summary.image('x_input',x,max_outputs=10)
        tf.summary.histogram('W_con1',W_conv1)

定义第二个卷积层 #尺寸为5X5,1个通道,64个不同的卷积核

    with tf.name_scope('hidden2'):
        W_conv2 = weight_variable([5,5,32,64])
        b_conv2 = bias_variable([64])
        l_conv2 = tf.nn.relu(tf.nn.conv2d(l_pool1, W_conv2, strides=[1,1,1,1], padding='SAME')+b_conv2)
        #最大池化层
        l_pool2 = tf.nn.max_pool(l_conv2, ksize=[1,2,2,1],strides=[1,2,2,1], padding='SAME')
        #h_conv2尺寸:14x14x64,h_pool1尺寸:7x7x64

        tf.summary.histogram('W_con2', W_conv2)
        tf.summary.histogram('b_con2', b_conv2)


定义全连接层 #将第二个卷积层的输出tensor进行变形,转成1D向量,然后连接一个全连接层, #1024隐含节点,并使用激活函数ReLU

    with tf.name_scope('fc1'):
        W_fc1 = weight_variable([64*7*7, 1024])
        b_fc1 = bias_variable([1024])
        l_pool2_flat = tf.reshape(l_pool2, [-1, 64*7*7])
        l_fc1 = tf.nn.relu(tf.matmul(l_pool2_flat, W_fc1) + b_fc1)
        #定义Dropout层
        #为了减轻过拟合
        keep_prob = tf.placeholder(tf.float32)
        l_fc1_drop = tf.nn.dropout(l_fc1, keep_prob)

        tf.summary.histogram('W_fc1', W_fc1)
        tf.summary.histogram('b_fc1', b_fc1)

    with tf.name_scope('fc2'):
        W_fc2 = weight_variable([1024, 10])
        b_fc2 = bias_variable([10])
        y_conv = tf.matmul(l_fc1_drop, W_fc2) + b_fc2

        tf.summary.histogram('W_fc1', W_fc1)
        tf.summary.histogram('b_fc1', b_fc1)

    with tf.name_scope('train'):
        #把softmax和cross entropy放到一个函数里
        cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y))
        #选择优化器,并让优化器最小化损失函数/收敛, 反向传播
        train_step = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cross_entropy)
        #定义评测的准确率
        correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y, 1))
        #用平均值来统计测试准确率
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        tf.summary.scalar('loss', cross_entropy)
        tf.summary.scalar('accuracy', accuracy)


with tf.Session(graph=myGraph) as sess:
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()

    merged = tf.summary.merge_all()#合并
    summary_writer = tf.summary.FileWriter('./mnistEven/', graph=sess.graph)#文件写路径

    for i in range(10001):
        #循环里面是训练的过程
        batch = mnist.train.next_batch(50)
        sess.run(train_step,feed_dict={x_raw:batch[0], y:batch[1], keep_prob:0.5})
        if i%100 == 0:
            train_accuracy = accuracy.eval(feed_dict={x_raw:batch[0], y:batch[1], keep_prob:1.0})
            print('step %d training accuracy:%g'%(i, train_accuracy))

            summary = sess.run(merged,feed_dict={x_raw:batch[0], y:batch[1], keep_prob:1.0})#计算变量
            summary_writer.add_summary(summary,i)# 每100步,将所有搜集的写文件

    test_accuracy = accuracy.eval(feed_dict={x_raw:mnist.test.images, y:mnist.test.labels, keep_prob:1.0})
    print('test accuracy:%g' %test_accuracy)

    #保存模型
    saver.save(sess,save_path='./model/mnistmodel',global_step=1)
上述代码中函数详解:
tf.nn.dropout
tf.truncated_normal
tf.nn.conv2d
tf.nn.max_pool
tf.reshape
tf.cast
tf.nn.tf.nn.softmax_cross_entropy_with_logits

2.predict.py

-- coding: utf-8 -

“””
Created on Thu Nov 16 22:25:18 2017
restoreCNN01训练好的模型的参数,然后输入预测图片并给出结果
@author: ASUS
“”“

import tensorflow as tf
from PIL import Image,ImageFilter
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(‘MNIST_data’, one_hot=True)

def imageprepare(argv): # 该函数读一张图片,处理后返回一个数组,进到网络中预测
“””
This function returns the pixel values.
The imput is a png file location.
“””
im = Image.open(argv).convert(‘L’)
width = float(im.size[0])
height = float(im.size[1])
newImage = Image.new(‘L’, (28, 28), (255)) # creates white canvas of 28x28 pixels

if width > height:  # check which dimension is bigger
    # Width is bigger. Width becomes 20 pixels.
    nheight = int(round((20.0 / width * height), 0))  # resize height according to ratio width
    if nheight == 0:  # rare case but minimum is 1 pixel
        nheight = 1
        # resize and sharpen
    img = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
    wtop = int(round(((28 - nheight) / 2), 0))  # caculate horizontal pozition
    newImage.paste(img, (4, wtop))  # paste resized image on white canvas
else:
    # Height is bigger. Heigth becomes 20 pixels.
    nwidth = int(round((20.0 / height * width), 0))  # resize width according to ratio height
    if (nwidth == 0):  # rare case but minimum is 1 pixel
        nwidth = 1
        # resize and sharpen
    img = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
    wleft = int(round(((28 - nwidth) / 2), 0))  # caculate vertical pozition
    newImage.paste(img, (wleft, 4))  # paste resized image on white canvas

# newImage.save("sample.png")

tv = list(newImage.getdata())  # get pixel values

# normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.
tva = [(255 - x) * 1.0 / 255.0 for x in tv]
return tva

def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)

def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)

myGraph = tf.Graph()
with myGraph.as_default(): # 重构相同的网络
with tf.name_scope(‘inputsAndLabels’):
x_raw = tf.placeholder(tf.float32, shape=[None, 784])
y = tf.placeholder(tf.float32, shape=[None, 10])

with tf.name_scope('hidden1'):
    x = tf.reshape(x_raw, shape=[-1,28,28,1])
    W_conv1 = weight_variable([5,5,1,32])
    b_conv1 = bias_variable([32])
    l_conv1 = tf.nn.relu(tf.nn.conv2d(x,W_conv1, strides=[1,1,1,1],padding='SAME') + b_conv1)
    l_pool1 = tf.nn.max_pool(l_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')

with tf.name_scope('hidden2'):
    W_conv2 = weight_variable([5,5,32,64])
    b_conv2 = bias_variable([64])
    l_conv2 = tf.nn.relu(tf.nn.conv2d(l_pool1, W_conv2, strides=[1,1,1,1], padding='SAME')+b_conv2)
    l_pool2 = tf.nn.max_pool(l_conv2, ksize=[1,2,2,1],strides=[1,2,2,1], padding='SAME')

with tf.name_scope('fc1'):
    W_fc1 = weight_variable([64*7*7, 1024])
    b_fc1 = bias_variable([1024])
    l_pool2_flat = tf.reshape(l_pool2, [-1, 64*7*7])
    l_fc1 = tf.nn.relu(tf.matmul(l_pool2_flat, W_fc1) + b_fc1)
    keep_prob = tf.placeholder(tf.float32)
    l_fc1_drop = tf.nn.dropout(l_fc1, keep_prob)

with tf.name_scope('fc2'):
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    y_conv = tf.matmul(l_fc1_drop, W_fc2) + b_fc2

with tf.Session(graph=myGraph) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()

saver.restore(sess,'./model/mnistmodel-1') # restore参数

array = imageprepare('./6.png') # 读一张包含数字的图片

prediction = tf.argmax(y_conv, 1) # 预测
prediction = prediction.eval(feed_dict={x_raw:[array],keep_prob:1.0},session=sess)
print('The digits in this image is:%d'%prediction[0])

“`

未完待修改。。。。。。

猜你喜欢

转载自blog.csdn.net/u011296723/article/details/78048096