Tensorflow学习笔记(四)

一、学习内容:

1.搭建卷积神经网络
2.Saver 保存与读取


二、代码及注释:

1.搭建卷积神经网络

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

def compute_accuracy(v_xs,v_ys):
    global prediction  # 将prediction定义成全局变量
    y_pre = sess.run(prediction,feed_dict={
    
    xs:v_xs,keep_prob:1})  # 将xs赋值给prediction生成预测值
    correct_prediction = tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1)) # 对比和真实数据的差别
                                                            # tf.argmax用来返回最大值所在的下标,0表示按列比较返回,1表示按行比较返回
    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    result = sess.run(accuracy,feed_dict={
    
    xs:v_xs,ys:v_ys,keep_prob:1}) # 这里的result是一个百分比
    return result

# weights
def weight_variable(shape):
    inital = tf.truncated_normal(shape,stddev=0.1)
    return tf.Variable(inital)

# biases
def bias_variable(shape):
    inital = tf.constant(0.1,shape=shape)
    return tf.Variable(inital)

# 定义二维卷积函数
def cov2d(x,W):  # x输入的值,图片的所有参数 W是卷积层的权重
    # stride [1,x_movement,y_movement,1]
    # must hace strides[0] = strides[3] = 1
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
                       #定义步长[1,1,1,1],第一个和最后一个是默认值
                       #中间两个1代表padding时在x方向运动一步,y方向运动一步,padding采用的方式是SAME。

def max_pool_2x2(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
                                   # SAME的padding方式,输出图片的大小没有变化依然,只是厚度变厚
# define placeholder for input to network
xs = tf.placeholder(tf.float32,[None,784])
ys = tf.placeholder(tf.float32,[None,10])
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(xs,[-1,28,28,1])  # -1代表暂时不指定图片的维度  28*28个像素点 1是channel的数量,黑白
#printf(x_image.shape)   # [n_samples,28,28,1]

## conv1 layer ##
W_conv1 = weight_variable([5,5,1,32])  # 卷积核patch大小为5x5, insize 1(image的厚度),输出32像素
b_covn1 = bias_variable([32])
h_conv1 = tf.nn.relu(cov2d(x_image,W_conv1) + b_covn1)  # 激活函数relu非线性化处理  outputsize:28x28x32
h_pool1 = max_pool_2x2(h_conv1)   # outputsize: 14x14x32

## conv2 layer ##
W_conv2 = weight_variable([5,5,32,64])  # patch 5x5,insize 32(image的厚度),outsize 64
b_covn2 = bias_variable([64])
h_conv2 = tf.nn.relu(cov2d(h_pool1,W_conv2) + b_covn2)  # relu非线性化处理  outputsize:14x14x32
h_pool2 = max_pool_2x2(h_conv2)   # outputsize: 7x7x32

## func1 layer 全连接层##
W_fc1 = weight_variable([7*7*64,1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])
                                   # 将三维数据变成一维 [n_samples,7,7,64]->>[n_sample,7*7*64]
                                   # -1表示先不考虑输入图片例子维度, 将上一个输出结果展平
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)   #防止过拟合

## func2 layer ##
W_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2) #使用softmax进行分类处理

## the error between prediction and real data ##
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

sess = tf.Session()
sess.run(tf.global_variables_initializer())

for i in range(1000):
    batch_xs,batch_ys = mnist.train.next_batch(100)
    sess.run(train_step,feed_dict={
    
    xs:batch_xs,ys:batch_ys,keep_prob:0.5})
    if i % 50 ==0:
        print(compute_accuracy(mnist.test.images,mnist.test.labels))

2.Saver保存读取

from __future__ import print_function
import tensorflow as tf
import numpy as np

# # Save to file
# W = tf.Variable([[1,2,3],[3,4,5]], dtype=tf.float32, name='weights')
# b = tf.Variable([[1,2,3]], dtype=tf.float32, name='biases')
#
# init = tf.initialize_all_variables()
# saver = tf.train.Saver() # 使用saver来存储各种变量
#
# with tf.Session() as sess:
#    sess.run(init)
#    save_path = saver.save(sess, "my_net/save_net.ckpt")
#    print("Save to path: ", save_path)

###################################################################################

# restore variables
# redefine the same shape and same type for your variables 只有shape和type完全一样才能正确导入
W = tf.Variable(np.arange(6).reshape((2, 3)), dtype=tf.float32, name="weights")
b = tf.Variable(np.arange(3).reshape((1, 3)), dtype=tf.float32, name="biases")

# not need init step 不需要初始化操作

saver = tf.train.Saver()
with tf.Session() as sess:
    saver.restore(sess, "my_net/save_net.ckpt")
    print("weights:", sess.run(W))
    print("biases:", sess.run(b))


猜你喜欢

转载自blog.csdn.net/WuwuwuH_/article/details/115530966