版权声明:希望尊重辛苦的自学之旅 https://blog.csdn.net/sinat_42378539/article/details/83048184
使用tensorflow训练自己的数据集—定义反向传播
上一篇使用tensorflow训练自己的数据集(三)中制作已经介绍了定义反向传播过程来训练神经网络,训练完神经网络后应对神经网络进行准确率的计算。
import time
import forward
import backward
import genertateds
import tensorflow as tf
# 等待时间
TEST_INTERVAL_SECS = 5
# 总测试集样本数量
test_num_examples = 128
def test():
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32,[test_num_examples,
forward.IMAGE_SIZE,
forward.IMAGE_SIZE,
forward.NUM_CHANNELS])
y_ = tf.placeholder(tf.int64,[None])
# 测试过程不需要正则化和dropout
y = forward.inference(x,False,None)
# 还原模型中的滑动平均
variable_average = tf.train.ExponentialMovingAverage(backward.MOVING_AVERAGE_DECAY)
variable_average_restore = variable_average.variables_to_restore()
saver = tf.train.Saver(variable_average_restore)
# 计算准确率
correct_prediction = tf.equal(tf.argmax(y,1),y_)
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
image_batch,label_batch = genertateds.get_batch_record(genertateds.test_record_path,20)
while True:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(backward.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
image, label = sess.run([image_batch, label_batch])
saver.restore(sess,ckpt.model_checkpoint_path)
# 从文件名称中读取第几次训练
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy,feed_dict={x:image,y_:label})
coord.request_stop()
coord.join(threads)
print("After %s training step(s),test accuray = %g"%(global_step,accuracy_score))
else:
time.sleep(TEST_INTERVAL_SECS)
def main():
test()
if __name__ == '__main__':
main()
到此就完成一个用tensorflow进行图像分类的简单的神经网络了。
如有错误望多多指教~~