#!/usr/bin/env python
# -*- coding: utf-8 -*-
#用一个简单网络做一个手写数字识别
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
mnist=input_data.read_data_sets('MNIST',one_hot=True)
#添加一个神经网络
def add_layer(inputs,in_size,out_size,actiavation_function=None):
weight=tf.Variable(tf.random_normal([in_size,out_size]))
biases=tf.Variable(tf.zeros([1,out_size])+0.1)
Plus=tf.matmul(inputs,weight)+biases
if actiavation_function is None:
outputs=Plus
else:
outputs=actiavation_function(Plus)
return outputs
#定义准确率,这个函数我之前没有写过
def compute_accuary(v_xs,v_ys):
global prediction
y_pre=sess.run(prediction,feed_dict={xs:v_xs})
correct_prediction=tf.equal(tf.arg_max(y_pre,1),tf.arg_max(v_ys,1))
accuary=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
result=sess.run(accuary,feed_dict={xs:v_xs,ys:v_ys})
return result
xs=tf.placeholder(tf.float32,[None,784])#每张图片都是28*28
ys=tf.placeholder(tf.float32,[None,10])#have 10 label
prediction=add_layer(xs,784,10,actiavation_function=tf.nn.softmax)#softmax用于分类
cross_entropy=tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1]))
train_step=tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess=tf.Session()
init=tf.global_variables_initializer()
sess.run(init)
for i in range(5000):
batch_xs,batch_ys=mnist.train.next_batch(3000)
sess.run(train_step,feed_dict={xs:batch_xs,ys:batch_ys})
if i %500==0:
print(compute_accuary(mnist.test.images,mnist.test.labels))
准确率大概90多一点,可能是我数据量不够,也可能是学习率的问题,只有我把学习率调整到了0.1,准确率下降到了87,关于准确率这个函数,我需要在莫烦的视频中后期再学习一下。。。