Mnist 手写数字 识别 tensorflow 示例

最近入门 tensorflow 用简单卷积神经网络做 MNIST 手写数字识别, 参考Pluralsight中教程 的代码 如下: 

定义filter以及bias的函数: 
import tensorflow as tf
def weightVariable(shape): # to generate the filter
    initial = tf.truncated_normal(shape,stddev=1.0)
    return tf.Variable(initial)
def biasVariable(shape):
    initial = tf.constant(0.1,shape=shape)
    return tf.Variable(initial)

def conv2d(x,W): # input the x as iamge :  [batch, nwidth, nheight, chennals]
    return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding ='SAME')

def MaxPooling_2x2(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')

导入数据以及 利用数据集的随机样本 训练网络:  

from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets(
    r"C:\Myfiles\deepTraining\Tensorflow\Course_tensorflow-understanding-foundations\Practice\mnist_data",one_hot=True)

training_digits, training_labels = mnist.train.next_batch(200)  # get the data point randomly

test_digits, test_labels = mnist.test.next_batch(100)

print("Data is ready")
print(test_digits.shape)
print(test_labels.shape)
# construct the net;

Xinput = tf.placeholder(tf.float32,shape=[None,784])
ylable = tf.placeholder(tf.float32,shape=[None,10])

x_image = tf.reshape(Xinput,[-1,28,28,1],name="image")
# x_image [batch, 28,28,1]
W_conv1 = weightVariable([5, 5, 1, 32])
# difine the filter size would be 5 by 5; 1 is the chennal number
# if it is a color picture the number would be 3,
# 32 is the features output chennals, 32 is defined by us
b_conv1 = biasVariable([32])
FirstConLayerOutput = conv2d(x_image, W_conv1) + b_conv1 # FirstConLayerOutput size:  [batch,28,28,32]
# push to relu function;
# relu funciton would keep the same size the data
h_con1 = tf.nn.relu(FirstConLayerOutput)  # h_con1:  [batch,28,28,32]
h_pool1 = MaxPooling_2x2(h_con1) # h_pool1:  [batch,14,14,32]

W_conv2 = weightVariable([5,5,32,64]) # 32 means the h_pool1 has 32 output chennals
b_conv2 = biasVariable([64])
SecondConLayerOutput = conv2d(h_pool1, W_conv2) + b_conv2 # SecondConLayerOutput:  [batch,14,14,64]
h_con2 = tf.nn.relu(SecondConLayerOutput) # h_con2:  [batch,14,14,64]
h_pool2 = MaxPooling_2x2(h_con2)  # h_pool2:  [batch,7,7,64]

# Then define the fully connect layer ;

Wfc1 = weightVariable([7 * 7 * 64, 1024])
bfc1 = biasVariable([1024])

h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) # h_pool2_flat: [batch, 7*7*64]
hfc1 = tf.nn.relu(tf.matmul(h_pool2_flat, Wfc1) + bfc1)  # hfc1: [batch, 1024]

keep_prob = tf.placeholder(tf.float32)
h_fc_drop = tf.nn.dropout(hfc1, keep_prob) # the drop out operation would not change the dimensions
Wfc2 = weightVariable([1024,10])
bfc2 = biasVariable([10])

hfc2 = tf.matmul(h_fc_drop, Wfc2) + bfc2  # hfc1 [batch, 10]

cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hfc2,labels=ylable))

trainStep = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    import time

    num_steps = 1000
    display_every = 100
    print("start:")
    start_time = time.time()
    end_time = time.time()
    for istep in range(800):

        onebatch = mnist.train.next_batch(13)

        trainStep.run(feed_dict={Xinput: onebatch[0], ylable: onebatch[1], keep_prob: 0.5})

        Y_fit = sess.run(tf.argmax(hfc2,1),{Xinput: onebatch[0], ylable: onebatch[1], keep_prob: 0.5})

        print(str(istep)+str(Y_fit))
        #print(Y_predict.shape)
    print("#----------------------------------------------------------------#")
    # 测试一下训练结果; 随机抽取 14个样本 比较识别结果 ;
    testbatch = mnist.train.next_batch(14)
    Y_predict = sess.run(tf.argmax(hfc2, 1),{Xinput: testbatch[0], ylable: testbatch[1], keep_prob: 0.5})
    Y_test = sess.run(tf.argmax(testbatch[1],1))
    print("Predict: ")
    print(Y_predict)
    print("Test database")
    print(Y_test)

猜你喜欢

转载自blog.csdn.net/chenxin0215/article/details/81660942