tensorflow入门系列(二)

利用简单的卷积神经网络对mnist数据集识别

在之前,我们利用了简单的神经网络对mnist数据集进行了识别,获得了98%的准确率。今天,将采用卷积神经网络对数据集进行识别。

程序

程序只需要在之前的程序上添加卷积,池化等操作即可

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import sys

from tensorflow.examples.tutorials.mnist import input_data

import tensorflow as tf 
FLAGS=None

data_dir='/tmp/tensorflow/mnist/input_data'
mnist= input_data.read_data_sets(data_dir,one_hot=True)




#输入
x=tf.placeholder(tf.float32,[None,784])

#实际标签
y_=tf.placeholder(tf.float32,[None,10])

#定义权重w和偏差项b
def init_weight(shape,st_dev=0.1):
    '''
    random_normal用于生成正太分布随机数,
    均值mean,标准差stddev,shape为矩阵大小
    '''
    weight=tf.Variable(tf.random_normal(shape,stddev=st_dev))
    return(weight)

def init_bias(shape,st_dev=0.1):
    bias=tf.Variable(tf.random_normal(shape,stddev=st_dev))
    return(bias)


#定义卷积操作
def conv2d(x,W):
    '''
    步长为1X1
    '''
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
#定义池化层
def max_pool_2x2(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

#定义卷积层操作
def conv(input_layer,weight,bias):
    layer=tf.add(conv2d(input_layer,weight),bias)
    return (tf.nn.relu(layer))

#定义全连接层
def full_connect(input_layer,weight,bias):
    layer=tf.add(tf.matmul(input_layer,weight),bias)
    return (tf.nn.relu(layer))

#dropout
keep_prob = tf.placeholder(tf.float32) 

#转化为一个4维张量
x_image=tf.reshape(x,[-1,28,28,1])


#第一层
#卷积 初始化卷积核
W_conv1=init_weight([5,5,1,32],0.1)
b_conv1=init_bias([32],0.1)

h_conv1=conv(x_image,W_conv1,b_conv1)
#~ x 28 x 28 x 32
h_pool1=max_pool_2x2(h_conv1)
#~ x 14 x 14 x 32
#第二层
W_conv2=init_weight([5,5,32,64],0.1)
b_conv2=init_bias([64],0.1)

h_conv2=conv(h_pool1,W_conv2,b_conv2)
#~ x 14 x 14 x 64
h_pool2=max_pool_2x2(h_conv2)
#~ x 7 x 7 x 64

#全连接层1
W_fc1=init_weight([7*7*64,1024],0.1)
b_fc1=init_bias([1024],0.1)

h_pool2_flat=tf.reshape(h_pool2,[-1,7*7*64])
h_fc1=full_connect(h_pool2_flat,W_fc1,b_fc1)
#dropout
h_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)

#输出层
W_fc2=init_weight([1024,10],0.1)
b_fc2=init_bias([10],0.1)

y=tf.add(tf.matmul(h_fc1_drop,W_fc2),b_fc2)

#特征输出

num_conv1=tf.shape(h_conv1)
num_pool1=tf.shape(h_pool1)
num_conv2=tf.shape(h_conv2)
num_pool2=tf.shape(h_pool2)




#计算交叉熵
cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y)
)

#学习率
learn_rate=tf.Variable(0.001)
train_step = tf.train.AdamOptimizer(learn_rate).minimize(cross_entropy)
#正确率
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

#参数初始化
init_op=tf.global_variables_initializer()


with tf.Session() as sess:
    sess.run(init_op)

    #学习率改变
    for epoch in range(50):
        sess.run(tf.assign(learn_rate,0.001 * (0.95 ** epoch)))
        #最多1000次迭代
        for _ in range(1000):
            batch_xs,batch_ys=mnist.train.next_batch(100)
            #在会话中,占位符可以使用 feed_dict 馈送数据。
            sess.run(train_step,feed_dict={x:batch_xs,y_:batch_ys,keep_prob:1.0})

        lr=sess.run(learn_rate)
        acc=sess.run(accuracy,feed_dict={x: mnist.test.images,y_: mnist.test.labels,keep_prob:1.0})
        print(" Iter "+ str(epoch) + ",Test Accuracy= "+ str(acc) + ",Learning_Rate ="+str(lr)+" ")

输出

可以看到,准确率达到了99.4%。对于我这个新手来说我已经满足了

Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
Extracting /tmp/tensorflow/mnist/input_data/train-images-idx3-ubyte.gz
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Extracting /tmp/tensorflow/mnist/input_data/train-labels-idx1-ubyte.gz
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Extracting /tmp/tensorflow/mnist/input_data/t10k-images-idx3-ubyte.gz
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Extracting /tmp/tensorflow/mnist/input_data/t10k-labels-idx1-ubyte.gz
 Iter 0,Test Accuracy= 0.9882,Learning_Rate =0.001 
 Iter 1,Test Accuracy= 0.9892,Learning_Rate =0.00095 
 Iter 2,Test Accuracy= 0.9898,Learning_Rate =0.0009025 
 Iter 3,Test Accuracy= 0.9919,Learning_Rate =0.000857375 
 Iter 4,Test Accuracy= 0.9889,Learning_Rate =0.000814506 
 Iter 5,Test Accuracy= 0.9908,Learning_Rate =0.000773781 
 Iter 6,Test Accuracy= 0.9913,Learning_Rate =0.000735092 
 Iter 7,Test Accuracy= 0.9907,Learning_Rate =0.000698337 
 Iter 8,Test Accuracy= 0.9919,Learning_Rate =0.00066342 
 Iter 9,Test Accuracy= 0.9914,Learning_Rate =0.000630249 
 Iter 10,Test Accuracy= 0.9926,Learning_Rate =0.000598737 
 Iter 11,Test Accuracy= 0.9838,Learning_Rate =0.0005688 
 Iter 12,Test Accuracy= 0.9933,Learning_Rate =0.00054036 
 Iter 13,Test Accuracy= 0.9931,Learning_Rate =0.000513342 
 Iter 14,Test Accuracy= 0.9928,Learning_Rate =0.000487675 
 Iter 15,Test Accuracy= 0.9931,Learning_Rate =0.000463291 
 Iter 16,Test Accuracy= 0.9934,Learning_Rate =0.000440127 
 Iter 17,Test Accuracy= 0.9936,Learning_Rate =0.00041812 
 Iter 18,Test Accuracy= 0.9938,Learning_Rate =0.000397214 
 Iter 19,Test Accuracy= 0.9939,Learning_Rate =0.000377354 
 Iter 20,Test Accuracy= 0.994,Learning_Rate =0.000358486 
 Iter 21,Test Accuracy= 0.9941,Learning_Rate =0.000340562 
 Iter 22,Test Accuracy= 0.9941,Learning_Rate =0.000323534 
 Iter 23,Test Accuracy= 0.994,Learning_Rate =0.000307357 
 Iter 24,Test Accuracy= 0.9941,Learning_Rate =0.000291989 
 Iter 25,Test Accuracy= 0.9942,Learning_Rate =0.00027739 
 Iter 26,Test Accuracy= 0.9941,Learning_Rate =0.00026352 
 Iter 27,Test Accuracy= 0.9944,Learning_Rate =0.000250344 
 Iter 28,Test Accuracy= 0.9942,Learning_Rate =0.000237827 
 Iter 29,Test Accuracy= 0.9942,Learning_Rate =0.000225936 
 Iter 30,Test Accuracy= 0.9943,Learning_Rate =0.000214639 
 Iter 31,Test Accuracy= 0.9943,Learning_Rate =0.000203907 
 Iter 32,Test Accuracy= 0.9942,Learning_Rate =0.000193711 
 Iter 33,Test Accuracy= 0.9941,Learning_Rate =0.000184026 
 Iter 34,Test Accuracy= 0.9941,Learning_Rate =0.000174825 
 Iter 35,Test Accuracy= 0.9942,Learning_Rate =0.000166083 
 Iter 36,Test Accuracy= 0.9942,Learning_Rate =0.000157779 
 Iter 37,Test Accuracy= 0.9943,Learning_Rate =0.00014989 
 Iter 38,Test Accuracy= 0.9943,Learning_Rate =0.000142396 
 Iter 39,Test Accuracy= 0.9943,Learning_Rate =0.000135276 
 Iter 40,Test Accuracy= 0.9944,Learning_Rate =0.000128512 
 Iter 41,Test Accuracy= 0.9944,Learning_Rate =0.000122087 
 Iter 42,Test Accuracy= 0.9944,Learning_Rate =0.000115982 
 Iter 43,Test Accuracy= 0.9943,Learning_Rate =0.000110183 
 Iter 44,Test Accuracy= 0.9944,Learning_Rate =0.000104674 
 Iter 45,Test Accuracy= 0.9943,Learning_Rate =9.94403e-05 
 Iter 46,Test Accuracy= 0.9942,Learning_Rate =9.44682e-05 
 Iter 47,Test Accuracy= 0.9942,Learning_Rate =8.97448e-05 
 Iter 48,Test Accuracy= 0.9942,Learning_Rate =8.52576e-05 
 Iter 49,Test Accuracy= 0.9943,Learning_Rate =8.09947e-05 

重点

主要是要学会算卷积和池化之后的张量的大小,不要算错。实在不行,可以用tf.shape来进行处理

猜你喜欢

转载自blog.csdn.net/weixin_39142651/article/details/80693097