Reconocimiento de dígitos escritos a mano basado en tensorflow

import numpy as np
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()         #解决tf.placeholder报错问题
import matplotlib.pyplot as plt
import input_data    #使用的数据库是tensorflow内置数据库,可下载到本地

mnist = input_data.read_data_sets('data/',one_hot=True)

#network topologies  网络拓扑
n_hidden_1 = 256
n_hidden_2 = 128
n_input = 784
n_classes = 10

#inputs and outputs   输入 输出
x = tf.placeholder("float",[None,n_input])
y = tf.placeholder("float",[None,n_classes])

#network parameters   网络参数
stddev = 0.1
weights = {
    
    
    'w1':tf.Variable(tf.random_normal([n_input,n_hidden_1],stddev=stddev)),
    'w2':tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2],stddev=stddev)),
    'out':tf.Variable(tf.random_normal([n_hidden_2,n_classes],stddev=stddev))
}
biases = {
    
    
    'b1':tf.Variable(tf.random_normal([n_hidden_1])),
    'b2':tf.Variable(tf.random_normal([n_hidden_2])),
    'out':tf.Variable(tf.random_normal([n_classes]))
}
print("NETWORK READY")

def multilayer_perceptron(_X,_weights,_biases):
    layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(_X,_weights['w1']),_biases['b1']))
    layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1,_weights['w2']),_biases['b2']))
    return (tf.matmul(layer_2,_weights['out'])+_biases['out'])

#prediction
pred = multilayer_perceptron(x,weights,biases)
#loss and optimizer  损失函数及优化器
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels=y))
optm = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cost)
corr = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accr = tf.reduce_mean(tf.cast(corr,"float"))
#initializer
init = tf.global_variables_initializer()
print("FUNCTIONS READY")
#迭代
training_epochs = 20
batch_size = 100
display_step = 4

#launch the graph
sess = tf.Session()
sess.run(init)

#optimize
for epoch in range(training_epochs):
    avg_cost = 0.
    total_batch = int(mnist.train.num_examples/batch_size)
    #iteration
    for i in range(total_batch):
        batch_xs,batch_ys = mnist.train.next_batch(batch_size)
        feeds = {
    
    x:batch_xs,y:batch_ys}
        sess.run(optm,feed_dict=feeds)
        avg_cost +=sess.run(cost,feed_dict=feeds)
    avg_cost = avg_cost/total_batch
    #display
    if (epoch+1)%display_step==0:
        print("Epoch:%03d/%03d cost:%.9f"%(epoch,training_epochs,avg_cost))
        feeds = {
    
    x:batch_xs,y:batch_ys}
        training_acc = sess.run(accr,feed_dict=feeds)
        print("Train Accuracy:%.3f"%(training_acc))
        feeds = {
    
    x:mnist.test.images,y:mnist.test.labels}
        test_acc = sess.run(accr,feed_dict=feeds)
        print("Test Accuracy:%.3f"%(test_acc))
print("Optimization Finished")

Supongo que te gusta

Origin blog.csdn.net/gets_s/article/details/109390986
Recomendado
Clasificación