1. 模型的保存
tensorflow example:
from __future__ import print_function
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(".\\Data\\MNIST_data\\", one_hot=True)
import tensorflow as tf
# Parameters
learning_rate = 0.001
batch_size = 100
display_step = 1
model_path = "./saved_model/model"
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_input], 'x')
y = tf.placeholder("float", [None, n_classes], 'y')
# Create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'], name= 'fc1')
layer_1 = tf.nn.relu(layer_1, name='relu1')
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'], name='fc2')
layer_2 = tf.nn.relu(layer_2, name='relu2')
# Output layer with linear activation
out_layer =tf.add(tf.matmul(layer_2, weights['h_out']), biases['b_out'], name='fc_out')
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1]), name='h1'),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]), name='h2'),
'h_out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]), name='h_out')
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1]), name = 'b1'),
'b2': tf.Variable(tf.random_normal([n_hidden_2]), name = 'b2'),
'b_out': tf.Variable(tf.random_normal([n_classes]), name = 'b_out')
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# 'Saver' op to save and restore all the variables
saver = tf.train.Saver()
# Running first session
print("Starting 1st session...")
with tf.Session() as sess:
# Run the initializer
if 'session' in locals() and session is not None:
print('Close interactive session')
session.close()
sess.run(init)
# Training cycle
for epoch in range(20):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
print("First Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
# Save model weights to disk
save_path = saver.save(sess, model_path)
print("Model saved in file: %s" % save_path)
其中, saver = tf.train.Saver()
用于产生一个存储对象,当参数为空时,对应完整模型
saver = tf.train.Saver(var_list)
则针对的时参数列表var_list
中的参数进行保存和恢复
2. 模型恢复
- 直接从存储模型中恢复graph
saver = tf.train.import_meta_graph('./saved_model/model.meta')
- 仅将存储的变量恢复(根据variable 相应的 name 进行恢复的)
saver = tf.train.Saver(var_list)
saver.restore(sess, check_point_path)
- 根据name获取graph中的变量值
graph = tf.get_default_graph
a = graph.get_tensor_by_name('a:0')
·
saver.restore(sess, saved_path)
:在使用该方法进行恢复模型时,必须保证,当前graph中拥有的所有variable(或者说,Saver的参数var_list)均存储在checkpoint中,并且name一致。恢复的仅仅是variable,与graph无关。
3. pruning and finetune
tf.reset_default_graph()
var_list = [] # 将要恢复的模型中的所有参数变量
saver = tf.train.Saver(var_list)
graph = tf.get_default_graph()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, 'check_point')
w = graph.get_tensor_by_name('w:0') #将要被 pruning的权重矩阵
p_w, mask = pruning(w) # pruning的结果,及其 mask
weights['w'] = tf.Variable( p_w, name = 'p_w')
pred, cost, optimizer = new_model_build(weights, biases) #构建修改后的model(略)
grads = optimizer.compute_gradients(cost)
delete_none_grads(grads)
# 利用mask停止相应的梯度更新
count = 0
for grad, var in grads:
if (var.name == "p_w:0"):
idx_in1 = tf.cast(mask,tf.float32)
grads[count] = (tf.multiply(idx_in1, grad), var)
break
count += 1
train_step = optimizer.apply_gradients(grads)
# 初始化新增变量
for var in tf.global_variables():
if sess.run(tf.is_variable_initialized(var)) == False:
sess.run(tf.variables_initializer([var]))
#训练和测试过程(略)
train()
test()
new_var_list = tf.trainable_variables()
new_saver = tf.train.Saver(new_var_list)
new_saver.save(sess, 'saved_path')
另一种做法:
tf.reset_default_graph()
pred, cost, optimizer = model_build(weights, biases)
var_list = [] # 将要恢复的模型中的所有参数变量
saver = tf.train.Saver(var_list)
graph = tf.get_default_graph()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, 'check_point')
w = graph.get_tensor_by_name('w:0') #将要被 pruning的权重矩阵
p_w, mask = pruning(w) # pruning的结果,及其 mask
for var in tf.trainable_variables():
if var.name == 'w:0':
prune_op = var.assign(p_w)
sess.run(prune_op)
grads = optimizer.compute_gradients(cost)
delete_none_grads(grads)
# 利用mask停止相应的梯度更新
count = 0
for grad, var in grads:
if (var.name == "p_w:0"):
idx_in1 = tf.cast(mask,tf.float32)
grads[count] = (tf.multiply(idx_in1, grad), var)
break
count += 1
train_step = optimizer.apply_gradients(grads)
#训练和测试过程(略)
train()
test()