[TF进阶] MNIST手写体识别完整代码&单个神经网络

实例21:识别图中模糊的手写数字

1. 下载数据集

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print('输入数据:',mnist.train.images)
print('输入数据打印shape:',mnist.train.images.shape)
import pylab
im = mnist.train.images[1]
im = im.reshape(-1, 28)
pylab.imshow(im)
pylab.show()
print ('输入数据打shape:',mnist.test.images.shape)
print ('输入数据打shape:',mnist.validation.images.shape)
输入数据: [[0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 ...
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]]
输入数据打印shape: (55000, 784)

输入数据打shape: (10000, 784)
输入数据打shape: (5000, 784)

2. 分析图片特点

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import pylab

tf.reset_default_graph()
# 定义占位符
x = tf.placeholder(tf.float32, [None, 784]) # MNIST数据集的维度28x28=784
y = tf.placeholder(tf.float32, [None, 10]) # 数字0-9,共10个类别
Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz

3. 构建模型

# 初始化模型权重
W = tf.Variable(tf.random_normal([784, 10]))
b = tf.Variable(tf.zeros([10]))

# softmax分类
pred = tf.nn.softmax(tf.matmul(x, W) + b)

# 损失函数
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))

# 定义参数
learning_rate = 0.01
# 使用梯度下降优化器
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

4. 训练模型

training_epochs = 25
batch_size = 100
display_step = 1
saver = tf.train.Saver()
model_path = "H:/tensorflow_projects/chap5/mnist_model.ckpt"

# 启动session
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())# Initializing OP

    # 启动循环开始训练
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples/batch_size)
        # 遍历全部数据集
        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            # Run optimization op (backprop) and cost op (to get loss value)
            _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, y: batch_ys})
            # Compute average loss
            avg_cost += c / total_batch
        # 显示训练中的详细信息
        if (epoch+1) % display_step == 0:
            print ("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))

    print( " Finished!")

# 测试 model
    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    # 计算准确率
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print ("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
    
# Save model weights to disk
    save_path = saver.save(sess, model_path)
    print("Model saved in file: %s" % save_path)
Epoch: 0001 cost= 8.528780973
Epoch: 0002 cost= 4.351987058
Epoch: 0003 cost= 3.044533993
Epoch: 0004 cost= 2.405865938
Epoch: 0005 cost= 2.023756936
Epoch: 0006 cost= 1.771609710
Epoch: 0007 cost= 1.594264874
Epoch: 0008 cost= 1.463273387
Epoch: 0009 cost= 1.362599298
Epoch: 0010 cost= 1.283132398
Epoch: 0011 cost= 1.218332462
Epoch: 0012 cost= 1.164574228
Epoch: 0013 cost= 1.118905594
Epoch: 0014 cost= 1.079640089
Epoch: 0015 cost= 1.045503370
Epoch: 0016 cost= 1.015250035
Epoch: 0017 cost= 0.988325027
Epoch: 0018 cost= 0.963962568
Epoch: 0019 cost= 0.942083137
Epoch: 0020 cost= 0.922068430
Epoch: 0021 cost= 0.903581946
Epoch: 0022 cost= 0.886608397
Epoch: 0023 cost= 0.870939313
Epoch: 0024 cost= 0.856314616
Epoch: 0025 cost= 0.842578177
 Finished!
Accuracy: 0.825
Model saved in file: H:/tensorflow_projects/chap5/mnist_model.ckpt

5. 测试模型

# 测试 model
    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    # 计算准确率
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print ("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))

6. 保存模型

    # Save model weights to disk
    save_path = saver.save(sess, model_path)
    print("Model saved in file: %s" % save_path)
saver = tf.train.Saver()

7. 读取模型

#读取模型
print("Starting 2nd session...")
with tf.Session() as sess:
    # Initialize variables
    sess.run(tf.global_variables_initializer())
    # Restore model weights from previously saved model
    saver.restore(sess, model_path)
    
     # 测试 model
    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    # 计算准确率
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print ("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
    
    output = tf.argmax(pred, 1)
    batch_xs, batch_ys = mnist.train.next_batch(2)
    outputval,predv = sess.run([output,pred], feed_dict={x: batch_xs})
    print(outputval,predv,batch_ys)

    im = batch_xs[0]
    im = im.reshape(-1,28)
    pylab.imshow(im)
    pylab.show()
    
    im = batch_xs[1]
    im = im.reshape(-1,28)
    pylab.imshow(im)
    pylab.show()
Accuracy: 0.825
[0 8] [[9.9999976e-01 4.6237684e-18 2.0244670e-08 4.7625484e-08 7.0704164e-18
  2.7070349e-10 9.5091435e-12 6.9175507e-17 9.4598128e-08 7.1266972e-15]
 [5.7434350e-05 3.0411970e-02 1.3331110e-02 1.6055863e-01 1.1928177e-03
  2.4296941e-02 9.0290455e-04 1.7760798e-05 7.6825178e-01 9.7868522e-04]] [[1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
 [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.]]


实例22:交叉熵实验

  • softmax_cross_entropy_with_logits
  • -tf.reduce_sum(labels * tf.log(tf.nn.softxmax(logits)), 1)
# -*- coding: utf-8 -*-
import tensorflow as tf


labels = [[0,0,1],[0,1,0]]
logits = [[2,  0.5,6],
          [0.1,0,  3]]
logits_scaled = tf.nn.softmax(logits)
logits_scaled2 = tf.nn.softmax(logits_scaled)


result1 = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
result2 = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits_scaled)
result3 = -tf.reduce_sum(labels*tf.log(logits_scaled),1)


with tf.Session() as sess:
    print ("scaled=",sess.run(logits_scaled))    
    print ("scaled2=",sess.run(logits_scaled2)) #经过第二次的softmax后,分布概率会有变化
    

    print ("rel1=",sess.run(result1),"\n")#正确的方式
    print ("rel2=",sess.run(result2),"\n")#如果将softmax变换完的值放进去会,就相当于算第二次softmax的loss,所以会出错
    print ("rel3=",sess.run(result3))
scaled= [[0.01791432 0.00399722 0.97808844]
 [0.04980332 0.04506391 0.90513283]]
scaled2= [[0.21747023 0.21446465 0.56806517]
 [0.2300214  0.22893383 0.5410447 ]]
rel1= [0.02215516 3.0996735 ] 

rel2= [0.56551915 1.4743223 ] 

rel3= [0.02215518 3.0996735 ]

实例23: one_hot实验

#标签总概率为1
labels = [[0.4,0.1,0.5],[0.3,0.6,0.1]]
result4 = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
with tf.Session() as sess:
    print ("rel4=",sess.run(result4),"\n") 
rel4= [2.1721554 2.7696736] 

实例24:sparse交叉熵的使用

#sparse
labels = [2,1] #其实是0 1 2 三个类。等价 第一行 001 第二行 010
result5 = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
with tf.Session() as sess:
    print ("rel5=",sess.run(result5),"\n")
rel5= [0.02215516 3.0996735 ] 

实例25:计算loss值

tf.reduce_mean(-tf.reduce_sum(labels * tf.log(logits_scaled),1) ) = tf.reduce_mean(result)

#注意!!!这个函数的返回值并不是一个数,而是一个向量,
#如果要求交叉熵loss,我们要对向量求均值,
#就是对向量再做一步tf.reduce_mean操作    
loss=tf.reduce_mean(result1)
with tf.Session() as sess:
    print ("loss=",sess.run(loss))
loss= 1.5609143
labels = [[0,0,1],[0,1,0]]    
loss2 = tf.reduce_mean(-tf.reduce_sum(labels * tf.log(logits_scaled),1) )
with tf.Session() as sess:
    print ("loss2=",sess.run(loss2))
loss2= 1.5609144
# -*- coding: utf-8 -*-
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/")

print ('输入数据:',mnist.train.images)
print ('输入数据打shape:',mnist.train.images.shape)

import pylab 
im = mnist.train.images[1]
im = im.reshape(-1,28)
pylab.imshow(im)
pylab.show()


print ('输入数据打shape:',mnist.test.images.shape)
print ('输入数据打shape:',mnist.validation.images.shape)


import tensorflow as tf #导入tensorflow库

tf.reset_default_graph()
# tf Graph Input
x = tf.placeholder(tf.float32, [None, 784]) # mnist data维度 28*28=784
y = tf.placeholder(tf.int32, [None]) # 0-9 数字=> 10 classes

# Set model weights
W = tf.Variable(tf.random_normal([784, 10]))
b = tf.Variable(tf.zeros([10]))

z= tf.matmul(x, W) + b
# 构建模型
pred = tf.nn.softmax(z) # Softmax分类

# Minimize error using cross entropy
#cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=z))
#参数设置
learning_rate = 0.01
# 使用梯度下降优化器
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

training_epochs = 25
batch_size = 100
display_step = 1


# 启动session
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())# Initializing OP

    # 启动循环开始训练
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples/batch_size)
        # 遍历全部数据集
        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            # Run optimization op (backprop) and cost op (to get loss value)
            _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,
                                                          y: batch_ys})
            # Compute average loss
            avg_cost += c / total_batch
        # 显示训练中的详细信息
        if (epoch+1) % display_step == 0:
            print ("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))

    print( " Finished!")
输入数据: [[0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 ...
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]]
输入数据打shape: (55000, 784)

 

输入数据打shape: (10000, 784)
输入数据打shape: (5000, 784)
Epoch: 0001 cost= 8.143192529
Epoch: 0002 cost= 4.322669148
Epoch: 0003 cost= 2.981214518
Epoch: 0004 cost= 2.356783852
Epoch: 0005 cost= 1.998906395
Epoch: 0006 cost= 1.765469893
Epoch: 0007 cost= 1.600443805
Epoch: 0008 cost= 1.477601487
Epoch: 0009 cost= 1.381630285
Epoch: 0010 cost= 1.305191407
Epoch: 0011 cost= 1.241832566
Epoch: 0012 cost= 1.188988984
Epoch: 0013 cost= 1.143483993
Epoch: 0014 cost= 1.104311068
Epoch: 0015 cost= 1.069696186
Epoch: 0016 cost= 1.039322816
Epoch: 0017 cost= 1.012039655
Epoch: 0018 cost= 0.987467080
Epoch: 0019 cost= 0.965332884
Epoch: 0020 cost= 0.945004881
Epoch: 0021 cost= 0.926361536
Epoch: 0022 cost= 0.909262278
Epoch: 0023 cost= 0.893383189
Epoch: 0024 cost= 0.878501318
Epoch: 0025 cost= 0.864673607
 Finished!

实例26 学习率衰减

# -*- coding: utf-8 -*-
import tensorflow as tf

global_step = tf.Variable(0, trainable=False)

initial_learning_rate = 0.1 #初始学习率

learning_rate = tf.train.exponential_decay(initial_learning_rate,
                                           global_step,
                                           decay_steps=10,decay_rate=0.9)
opt = tf.train.GradientDescentOptimizer(learning_rate)

add_global = global_step.assign_add(1)
with tf.Session() as sess:
    tf.global_variables_initializer().run()
    print(sess.run(learning_rate))
    for i in range(20):
        g, rate = sess.run([add_global, learning_rate])
        print(g,rate)
0.1
1 0.1
2 0.09791484
3 0.09688862
4 0.095873155
5 0.094868325
6 0.09387404
7 0.092890166
8 0.09191661
9 0.09095325
10 0.089999996
11 0.08905673
12 0.088123344
13 0.08719975
14 0.08628584
15 0.0853815
16 0.084486626
17 0.08360115
18 0.08272495
19 0.08185792
20 0.08099999


实例27:·Maxout网络实现mnist分类.py

# -*- coding: utf-8 -*-
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("H:/tensorflow_projects/chap6/MNIST_data/")

print ('输入数据:',mnist.train.images)
print ('输入数据打shape:',mnist.train.images.shape)

import pylab 
im = mnist.train.images[1]
im = im.reshape(-1,28)
pylab.imshow(im)
pylab.show()


print ('输入数据打shape:',mnist.test.images.shape)
print ('输入数据打shape:',mnist.validation.images.shape)


import tensorflow as tf #导入tensorflow库

def max_out(inputs, num_units, axis=None):
    shape = inputs.get_shape().as_list()
    if shape[0] is None:
        shape[0] = -1
    if axis is None:  # Assume that channel is the last dimension
        axis = -1
    num_channels = shape[axis]
    if num_channels % num_units:
        raise ValueError('number of features({}) is not '
                         'a multiple of num_units({})'.format(num_channels, num_units))
    shape[axis] = num_units
    shape += [num_channels // num_units]
    outputs = tf.reduce_max(tf.reshape(inputs, shape), -1, keep_dims=False)
    return outputs


tf.reset_default_graph()
# tf Graph Input
x = tf.placeholder(tf.float32, [None, 784]) # mnist data维度 28*28=784
y = tf.placeholder(tf.int32, [None]) # 0-9 数字=> 10 classes

# Set model weights
W = tf.Variable(tf.random_normal([784, 100]))
b = tf.Variable(tf.zeros([100]))


z= tf.matmul(x, W) + b
#maxout = tf.reduce_max(z,axis= 1,keep_dims=True)

maxout= max_out(z, 50)

# Set model weights
W2 = tf.Variable(tf.truncated_normal([50, 10], stddev=0.1))
b2 = tf.Variable(tf.zeros([10]))
# 构建模型
#pred = tf.nn.softmax(tf.matmul(maxout, W2) + b2)
pred = tf.matmul(maxout, W2) + b2
# 构建模型
#pred = tf.nn.softmax(z) # Softmax分类

# Minimize error using cross entropy
#cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=pred))

#参数设置
learning_rate = 0.04
# 使用梯度下降优化器
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)


training_epochs = 200
batch_size = 100
display_step = 1


# 启动session
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())# Initializing OP

    # 启动循环开始训练
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples/batch_size)
        # 遍历全部数据集
        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            # Run optimization op (backprop) and cost op (to get loss value)
            _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,
                                                          y: batch_ys})
            # Compute average loss
            avg_cost += c / total_batch
        # 显示训练中的详细信息
        if (epoch+1) % display_step == 0:
            print ("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))

    print( " Finished!")
Extracting H:/tensorflow_projects/chap6/MNIST_data/train-images-idx3-ubyte.gz
Extracting H:/tensorflow_projects/chap6/MNIST_data/train-labels-idx1-ubyte.gz
Extracting H:/tensorflow_projects/chap6/MNIST_data/t10k-images-idx3-ubyte.gz
Extracting H:/tensorflow_projects/chap6/MNIST_data/t10k-labels-idx1-ubyte.gz
输入数据: [[0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 ...
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]]
输入数据打shape: (55000, 784)

输入数据打shape: (10000, 784)
输入数据打shape: (5000, 784)
Epoch: 0001 cost= 1.669748494
Epoch: 0002 cost= 0.819802765
Epoch: 0003 cost= 0.668256996
Epoch: 0004 cost= 0.599882030
Epoch: 0005 cost= 0.551539327
Epoch: 0006 cost= 0.519114701
Epoch: 0007 cost= 0.501650673
Epoch: 0008 cost= 0.480439953
Epoch: 0009 cost= 0.465431287
Epoch: 0010 cost= 0.454214447
Epoch: 0011 cost= 0.442614048
Epoch: 0012 cost= 0.429748516
Epoch: 0013 cost= 0.419512733
Epoch: 0014 cost= 0.412809217
Epoch: 0015 cost= 0.403128482
Epoch: 0016 cost= 0.395945490
Epoch: 0017 cost= 0.387481769
Epoch: 0018 cost= 0.382592868
Epoch: 0019 cost= 0.376352434
Epoch: 0020 cost= 0.371442565
Epoch: 0021 cost= 0.366640467
Epoch: 0022 cost= 0.360618622
Epoch: 0023 cost= 0.357322852
Epoch: 0024 cost= 0.353282172
Epoch: 0025 cost= 0.348204653
Epoch: 0026 cost= 0.344141857
Epoch: 0027 cost= 0.340688343
Epoch: 0028 cost= 0.336875352
Epoch: 0029 cost= 0.332229141
Epoch: 0030 cost= 0.329368933
Epoch: 0031 cost= 0.324990445
Epoch: 0032 cost= 0.323535117
Epoch: 0033 cost= 0.319696042
Epoch: 0034 cost= 0.316543529
Epoch: 0035 cost= 0.314367712
Epoch: 0036 cost= 0.309627955
Epoch: 0037 cost= 0.308954497
Epoch: 0038 cost= 0.305743327
Epoch: 0039 cost= 0.303948994
Epoch: 0040 cost= 0.300707549
Epoch: 0041 cost= 0.298111228
Epoch: 0042 cost= 0.295571287
Epoch: 0043 cost= 0.293599232
Epoch: 0044 cost= 0.292371846
Epoch: 0045 cost= 0.290433042
Epoch: 0046 cost= 0.286466155
Epoch: 0047 cost= 0.284913121
Epoch: 0048 cost= 0.282463599
Epoch: 0049 cost= 0.282443535
Epoch: 0050 cost= 0.278840295
Epoch: 0051 cost= 0.277910688
Epoch: 0052 cost= 0.275044623
Epoch: 0053 cost= 0.274304534
Epoch: 0054 cost= 0.271387891
Epoch: 0055 cost= 0.270530891
Epoch: 0056 cost= 0.269293524
Epoch: 0057 cost= 0.267875358
Epoch: 0058 cost= 0.265286128
Epoch: 0059 cost= 0.263074537
Epoch: 0060 cost= 0.261540208
Epoch: 0061 cost= 0.261259574
Epoch: 0062 cost= 0.259737343
Epoch: 0063 cost= 0.258162930
Epoch: 0064 cost= 0.256089119
Epoch: 0065 cost= 0.254655639
Epoch: 0066 cost= 0.253505012
Epoch: 0067 cost= 0.252484518
Epoch: 0068 cost= 0.249667299
Epoch: 0069 cost= 0.249462925
Epoch: 0070 cost= 0.249046204
Epoch: 0071 cost= 0.247562397
Epoch: 0072 cost= 0.245829041
Epoch: 0073 cost= 0.244501937
Epoch: 0074 cost= 0.243986385
Epoch: 0075 cost= 0.242621479
Epoch: 0076 cost= 0.241314949
Epoch: 0077 cost= 0.238647706
Epoch: 0078 cost= 0.238957213
Epoch: 0079 cost= 0.237347329
Epoch: 0080 cost= 0.234964659
Epoch: 0081 cost= 0.236123101
Epoch: 0082 cost= 0.233973439
Epoch: 0083 cost= 0.232953551
Epoch: 0084 cost= 0.232046905
Epoch: 0085 cost= 0.229982579
Epoch: 0086 cost= 0.229070544
Epoch: 0087 cost= 0.228393014
Epoch: 0088 cost= 0.227479590
Epoch: 0089 cost= 0.227268234
Epoch: 0090 cost= 0.225049027
Epoch: 0091 cost= 0.224516309
Epoch: 0092 cost= 0.223888728
Epoch: 0093 cost= 0.223191615
Epoch: 0094 cost= 0.221796969
Epoch: 0095 cost= 0.221250222
Epoch: 0096 cost= 0.220323073
Epoch: 0097 cost= 0.218742449
Epoch: 0098 cost= 0.218513060
Epoch: 0099 cost= 0.217564493
Epoch: 0100 cost= 0.215474659
Epoch: 0101 cost= 0.214555269
Epoch: 0102 cost= 0.213661779
Epoch: 0103 cost= 0.214191178
Epoch: 0104 cost= 0.213189474
Epoch: 0105 cost= 0.212041208
Epoch: 0106 cost= 0.211847621
Epoch: 0107 cost= 0.210278228
Epoch: 0108 cost= 0.208721001
Epoch: 0109 cost= 0.209450811
Epoch: 0110 cost= 0.207888889
Epoch: 0111 cost= 0.206186019
Epoch: 0112 cost= 0.205807320
Epoch: 0113 cost= 0.205915253
Epoch: 0114 cost= 0.204875258
Epoch: 0115 cost= 0.204274523
Epoch: 0116 cost= 0.204331738
Epoch: 0117 cost= 0.201808658
Epoch: 0118 cost= 0.201525647
Epoch: 0119 cost= 0.199703673
Epoch: 0120 cost= 0.200700889
Epoch: 0121 cost= 0.199350320
Epoch: 0122 cost= 0.198106946
Epoch: 0123 cost= 0.198094789
Epoch: 0124 cost= 0.196696438
Epoch: 0125 cost= 0.196361274
Epoch: 0126 cost= 0.196492676
Epoch: 0127 cost= 0.194797525
Epoch: 0128 cost= 0.194349858
Epoch: 0129 cost= 0.193110045
Epoch: 0130 cost= 0.192708968
Epoch: 0131 cost= 0.192399970
Epoch: 0132 cost= 0.190516700
Epoch: 0133 cost= 0.190331284
Epoch: 0134 cost= 0.190980941
Epoch: 0135 cost= 0.189532741
Epoch: 0136 cost= 0.188812766
Epoch: 0137 cost= 0.187239818
Epoch: 0138 cost= 0.187442517
Epoch: 0139 cost= 0.186436391
Epoch: 0140 cost= 0.185879297
Epoch: 0141 cost= 0.184914501
Epoch: 0142 cost= 0.185321765
Epoch: 0143 cost= 0.183773249
Epoch: 0144 cost= 0.183931502
Epoch: 0145 cost= 0.183287879
Epoch: 0146 cost= 0.182621817
Epoch: 0147 cost= 0.181577222
Epoch: 0148 cost= 0.180124871
Epoch: 0149 cost= 0.181275859
Epoch: 0150 cost= 0.180238542
Epoch: 0151 cost= 0.178712672
Epoch: 0152 cost= 0.178188846
Epoch: 0153 cost= 0.177580589
Epoch: 0154 cost= 0.177027715
Epoch: 0155 cost= 0.177836312
Epoch: 0156 cost= 0.176792373
Epoch: 0157 cost= 0.175756311
Epoch: 0158 cost= 0.174947099
Epoch: 0159 cost= 0.174266882
Epoch: 0160 cost= 0.174342527
Epoch: 0161 cost= 0.172602550
Epoch: 0162 cost= 0.172811079
Epoch: 0163 cost= 0.172335094
Epoch: 0164 cost= 0.171968882
Epoch: 0165 cost= 0.171027398
Epoch: 0166 cost= 0.169943000
Epoch: 0167 cost= 0.170124644
Epoch: 0168 cost= 0.168496490
Epoch: 0169 cost= 0.169623626
Epoch: 0170 cost= 0.168593532
Epoch: 0171 cost= 0.167650817
Epoch: 0172 cost= 0.167899388
Epoch: 0173 cost= 0.166965650
Epoch: 0174 cost= 0.166645279
Epoch: 0175 cost= 0.166120962
Epoch: 0176 cost= 0.165155771
Epoch: 0177 cost= 0.165017686
Epoch: 0178 cost= 0.163808241
Epoch: 0179 cost= 0.163797412
Epoch: 0180 cost= 0.162719157
Epoch: 0181 cost= 0.163193959
Epoch: 0182 cost= 0.161633140
Epoch: 0183 cost= 0.162454181
Epoch: 0184 cost= 0.161832177
Epoch: 0185 cost= 0.161416251
Epoch: 0186 cost= 0.159936835
Epoch: 0187 cost= 0.160258861
Epoch: 0188 cost= 0.159245104
Epoch: 0189 cost= 0.158908117
Epoch: 0190 cost= 0.157777246
Epoch: 0191 cost= 0.157958048
Epoch: 0192 cost= 0.157402902
Epoch: 0193 cost= 0.157361584
Epoch: 0194 cost= 0.156321988
Epoch: 0195 cost= 0.156084833
Epoch: 0196 cost= 0.155017134
Epoch: 0197 cost= 0.155896032
Epoch: 0198 cost= 0.154472644
Epoch: 0199 cost= 0.154645715
Epoch: 0200 cost= 0.153077820
 Finished!
发布了261 篇原创文章 · 获赞 137 · 访问量 20万+

猜你喜欢

转载自blog.csdn.net/weixin_37993251/article/details/89501449