Saving and using tensorboard models
8-1 saver_save (train model and save)
Add two sentences to the previous code
saver =tf.train.Saver()
saver.save(sess,'net/my_net.ckpt')
The first parameter is the session to save, the second is the path to save the model
These two lines of code are added in the latter part
#train the model and save import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data #Load dataset mnist = input_data.read_data_sets( "MNIST_data" , one_hot = True ) # 100 photos per batch batch_size = 100 #Calculate how many batches there are n_batch = mnist.train.num_examples // batch_size #Define two placeholders x = tf.placeholder (tf.float32 , [ None, 784 ]) y = tf.placeholder(tf.float32,[None,10]) #Create a simple neural network with 784 neurons in the input layer and 10 neurons in the output layer W = tf.Variable(tf.zeros([ 784 , 10 ])) b = tf.Variable(tf.zeros([10])) prediction = tf.nn.softmax(tf.matmul(x,W)+b) #Secondary cost function # loss = tf.reduce_mean(tf.square(y-prediction)) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction)) #使用梯度下降法 train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss) #Initialize variables init = tf.global_variables_initializer() #The result is stored in a boolean list correct_prediction = tf.equal(tf.argmax(y , 1 ) , tf.argmax(prediction , 1 )) #argmax returns the position of the largest value in the one-dimensional tensor #Seek accuracy accuracy = tf.reduce_mean(tf.cast(correct_prediction , tf.float32)) saver = tf.train.Saver() #one add saver with tf.Session() as sess: sess.run(init) for epoch in range(11): for batch in range(n_batch): batch_xs,batch_ys = mnist.train.next_batch(batch_size) sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys}) acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}) print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc)) #保存模型 saver.save(sess,'net/my_net.ckpt') #保存会话
After running, you will find four more files
8-2 saver_restore (load model and use)
also use two sentences
saver = tf.train.Saver()
saver.restore(sess,'net/my_net.ckpt')
The first parameter is the session to test and the second parameter is the path of our trained model
These two lines of code are added in the latter part
## Load the model and use import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data #Load dataset mnist = input_data.read_data_sets( "MNIST_data" , one_hot = True ) # 100 photos per batch batch_size = 100 #Calculate how many batches there are n_batch = mnist.train.num_examples // batch_size #Define two placeholders x = tf.placeholder (tf.float32 , [ None, 784 ]) y = tf.placeholder(tf.float32,[None,10]) #Create a simple neural network with 784 neurons in the input layer and 10 neurons in the output layer W = tf.Variable(tf.zeros([ 784 , 10 ])) b = tf.Variable(tf.zeros([10])) prediction = tf.nn.softmax(tf.matmul(x,W)+b) #Secondary cost function # loss = tf.reduce_mean(tf.square(y-prediction)) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction)) #使用梯度下降法 train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss) #Initialize variables init = tf.global_variables_initializer() #The result is stored in a boolean list correct_prediction = tf.equal(tf.argmax(y , 1 ) , tf.argmax(prediction , 1 )) #argmax returns the position of the largest value in the one-dimensional tensor #Seek accuracy accuracy = tf.reduce_mean(tf.cast(correct_prediction , tf.float32)) saver = tf.train.Saver() with tf.Session() as sess: sess.run(init) print (sess.run(accuracy , feed_dict ={x:mnist.test.images , y:mnist.test.labels})) #The prediction of the model is not loaded saver.restore(sess , 'net/my_net.ckpt' ) #Use the model for training print (sess.run(accuracy , feed_dict ={x:mnist.test.images , y:mnist.test.labels})) #Load Prediction after entering the trained model