TensorFlow保存读取模型

保存模型


import tensorflow as tf
import numpy as np

def add_layer(inputs, in_size, out_size, n_layer, activation_function=None):
    layer_name = 'layer%s'%n_layer
    with tf.name_scope(layer_name):
        with tf.name_scope("weights"):
            Weights = tf.Variable(tf.random_normal([in_size, out_size]),name="w")
            tf.summary.histogram(layer_name+'/weights',Weights)
        with tf.name_scope("biases"):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1,name="b")
            tf.summary.histogram(layer_name+'/biases',biases)
        with tf.name_scope("wx_plus_b"):
            Wx_plus_b = tf.matmul(inputs, Weights) + biases
        if activation_function is None:
            outputs = Wx_plus_b
        else:
            outputs = activation_function(Wx_plus_b)
            tf.summary.histogram(layer_name+'/outputs',outputs)
        return outputs

x_data = np.loadtxt("path",dtype='float32')
y_data = np.loadtxt("path",dtype='float32')

# define placeholder for inputs to network
with tf.name_scope("inputs"):
    xs = tf.placeholder(tf.float32, [None, X1],name="x_input")
ys = tf.placeholder(tf.float32, [None, Y1],name="y_output")
# add hidden layer
l1 = add_layer(xs, X1, X2,n_layer=1, activation_function=None)
# add output layer  tf.nn.relu
prediction = add_layer(l1, X2, Y1, n_layer=3, activation_function=None)

my_weights = np.loadtxt("path",dtype='float32')

# 设置自适应学习率
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.0000015
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 100000, 0.96, staircase=True)

with tf.name_scope("loss"):
    loss = tf.reduce_mean(tf.reduce_sum(tf.matmul(tf.square(ys-prediction),my_weights), reduction_indices=[1]))

with tf.name_scope("training"):
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
# important step
init = tf.global_variables_initializer()
sess= tf.Session(config=tf.ConfigProto(log_device_placement=True))

saver = tf.train.Saver()
sess.run(init)
for i in range(1000):
    # training
    sess.run(train_step, feed_dict={xs: x_data[0:1300,:], ys: y_data[0:1300,:]})
    if i % 500 == 0:
        MSE = sess.run(loss, feed_dict={xs: x_data[0:1300,:], ys: y_data[0:1300,:]})
        print(MSE)

    if i == 10000:
        saver_path = saver.save(sess, "path\\model.ckpt")  # 将模型保存到save/model.ckpt文件
        print("Model saved in file:", saver_path)

读取模型

import tensorflow as tf
import numpy as np

def add_layer(inputs, in_size, out_size, n_layer, activation_function=None):
    layer_name = 'layer%s'%n_layer
    with tf.name_scope(layer_name):
        with tf.name_scope("weights"):
            Weights = tf.Variable(tf.random_normal([in_size, out_size]),name="w")
            tf.summary.histogram(layer_name+'/weights',Weights)
        with tf.name_scope("biases"):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1,name="b")
            tf.summary.histogram(layer_name+'/biases',biases)
        with tf.name_scope("wx_plus_b"):
            Wx_plus_b = tf.matmul(inputs, Weights) + biases
        if activation_function is None:
            outputs = Wx_plus_b
        else:
            outputs = activation_function(Wx_plus_b)
            tf.summary.histogram(layer_name+'/outputs',outputs)
        return outputs

x_data = np.loadtxt("path",dtype='float32')
y_data = np.loadtxt("path",dtype='float32')

# define placeholder for inputs to network
with tf.name_scope("inputs"):
    xs = tf.placeholder(tf.float32, [None, X1],name="x_input")
ys = tf.placeholder(tf.float32, [None, Y1],name="y_output")
# add hidden layer
l1 = add_layer(xs, X1, X2,n_layer=1, activation_function=None)
# add output layer  tf.nn.relu
prediction = add_layer(l1, X2, Y1, n_layer=3, activation_function=None)

my_weights = np.loadtxt("path",dtype='float32')

# 设置自适应学习率
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.0000015
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 100000, 0.96, staircase=True)

with tf.name_scope("loss"):
    loss = tf.reduce_mean(tf.reduce_sum(tf.matmul(tf.square(ys-prediction),my_weights), reduction_indices=[1]))

with tf.name_scope("training"):
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
# important step
init = tf.global_variables_initializer()
sess= tf.Session(config=tf.ConfigProto(log_device_placement=True))

saver = tf.train.Saver()
sess.run(init)

saver.restore(sess, "path\\model.ckpt")

for i in range(2):
    MSE = sess.run(loss, feed_dict={xs: x_data[0:1300,:], ys: y_data[0:1300,:]})
    print(MSE)

猜你喜欢

转载自blog.csdn.net/Xu_Haocan/article/details/78359634