TensorFlow-mnist实际操练(卷积运算)

版权声明:本文为博主原创文章,欢迎转载。 https://blog.csdn.net/samylee/article/details/85049717

TensorFlow-mnist实际操练(卷积运算

硬件:NVIDIA-GTX1080

软件:Windows7、python3.6.5、tensorflow-gpu-1.4.0

一、基础知识

1、strides = [1, x_movement, y_movement, 1], batch and channel dose not need conv

2、caffe->n,c,h,w   tensorflow->n,h,w,c

3、全连接依然是矩阵运算

二、代码展示

import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data

mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

def compute_acc(v_xs, v_ys):
    global prediction
    y_pred = sess.run(prediction, feed_dict = {xs: v_xs, keep_prob: 1})
    compare_bool = tf.equal(tf.argmax(y_pred, 1), tf.argmax(v_ys, 1))
    accuracy = tf.reduce_mean(tf.cast(compare_bool, tf.float32))
    outputs = sess.run(accuracy)
    return outputs

def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev = 0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape = shape)
    return tf.Variable(initial)

def conv2d(inputs, Weights):
    # strides [1, x_movement, y_movement, 1], batch and channel dose not need conv
    return tf.nn.conv2d(inputs, Weights, strides = [1, 1, 1, 1], padding = 'SAME')

def max_pool_2x2(inputs):
    # strides [1, x_movement, y_movement, 1], batch and channel dose not need pool
    return tf.nn.max_pool(inputs, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')

xs = tf.placeholder(tf.float32, [None, 28*28]) #None means batch
ys = tf.placeholder(tf.float32, [None, 10]) #None means batch
keep_prob = tf.placeholder(tf.float32)

x_image = tf.reshape(xs, [-1, 28, 28, 1]) #-1 means batch   caffe->n,c,h,w   tensorflow->n,h,w,c

#conv1
conv1_W = weight_variable([5, 5, 1, 32])# ksize 5x5, in size 1, out size 32
conv1_b = bias_variable([32])
conv1_layer = tf.nn.relu(conv2d(x_image, conv1_W) + conv1_b)

#pool1
pool1_layer = max_pool_2x2(conv1_layer)

#conv2
conv2_W = weight_variable([5, 5, 32, 64])
conv2_b = bias_variable([64])
conv2_layer = tf.nn.relu(conv2d(pool1_layer, conv2_W) + conv2_b)

#pool2
pool2_layer = max_pool_2x2(conv2_layer)

#layer flat for innerproduct
layer_flat = tf.reshape(pool2_layer, [-1, 7*7*64]) #-1 means batch

#fc1
fc1_W = weight_variable([7*7*64, 128])
fc1_b = bias_variable([128])
fc1_layer = tf.nn.relu(tf.matmul(layer_flat, fc1_W) + fc1_b)

#dropout
dropout_layer = tf.nn.dropout(fc1_layer, keep_prob)

#fc2
fc2_W = weight_variable([128, 10])
fc2_b = bias_variable([10])
prediction = tf.nn.softmax(tf.matmul(dropout_layer, fc2_W) + fc2_b)

#loss
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction), reduction_indices = [1]))
optimizer = tf.train.AdamOptimizer(1e-4)
train_step = optimizer.minimize(cross_entropy)

init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    for step in range(2000):
        batch_xs, batch_ys = mnist.train.next_batch(100)
        sess.run(train_step, feed_dict = {xs: batch_xs, ys: batch_ys, keep_prob: 0.5})
        if step%100 == 0:
            print(compute_acc(mnist.test.images, mnist.test.labels))

三、结果展示

Extracting MNIST_data\train-images-idx3-ubyte.gz
Extracting MNIST_data\train-labels-idx1-ubyte.gz
Extracting MNIST_data\t10k-images-idx3-ubyte.gz
Extracting MNIST_data\t10k-labels-idx1-ubyte.gz
0.1303
0.7358
0.8439
0.8905
0.9062
0.9212
0.9265
0.9346
0.9383
0.943
0.9467
0.9524
0.9524
0.9548
0.9558
0.9597
0.9639
0.9628
0.9654
0.9686

任何问题请加唯一QQ2258205918(名称samylee)!

猜你喜欢

转载自blog.csdn.net/samylee/article/details/85049717