tensorflow 保存与使用训练好的模型

使用tensorflow训练模型后,把模型的所有参数保存下来,后面,直接使用就好。

首先,创建一个tf.train.Saver对象

保存所有参数:Saver 对象的 save() 函数

使用已保存的模型:Saver 对象的 restore() 函数

tensorflow API:

save(
    sess,
    save_path,
    global_step=None,
    latest_filename=None,
    meta_graph_suffix='meta',
    write_meta_graph=True,
    write_state=True,
    strip_default_attrs=False
)

saver.save(sess, 'my-model', global_step=0) ==> filename: 'my-model-0
saver.save(sess, 'my-model', global_step=1000) ==> filename: 'my-model-1000'

(第一个参数,是训练模型时的session;第二个参数,是模型保存成的文件名;第三个参数可以省略,它可以给文件名加入一个step的信息)

restore(    sess,
    save_path
)

(第一个参数,是训练模型时的session;第二个参数,是模型保存成的文件)

代码例子1:

import tensorflow as tf
import numpy as np

x = tf.placeholder(tf.float32, shape=[None, 1])
y = 4 * x + 4

w = tf.Variable(tf.random_normal([1], -1, 1))
b = tf.Variable(tf.zeros([1]))
y_predict = w * x + b


loss = tf.reduce_mean(tf.square(y - y_predict))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)

isTrain = False
train_steps = 100
checkpoint_steps = 50
checkpoint_dir = 'myModelG'

saver = tf.train.Saver()  # defaults to saving all variables - in this case w and b
x_data = np.reshape(np.random.rand(10).astype(np.float32), (10, 1))

with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    if isTrain:
        for i in range(train_steps):
            sess.run(train, feed_dict={x: x_data})
            if (i + 1) % checkpoint_steps == 0:
                # saver.save(sess, checkpoint_dir + 'model.ckpt', global_step=i+1)
                saver.save(sess, 'myModelG/model.ckpt', global_step=i + 1)
    else:
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            pass
        print(sess.run(w))
        print(sess.run(b))
        value = 3 * w + b
        rrr = sess.run(value)
        print(rrr)


代码例子2:

import tensorflow as tf
import numpy as np

# 训练数据
Train_dataPath = 'F:\\A_Noise_20180130\\CNN\\trainingdata\\data.log'
Train_labelPath = 'F:\\A_Noise_20180130\\CNN\\trainingdata\\labels.log'

# 测试数据
Test_dataPath = 'F:\\A_Noise_20180130\\CNN\\testingdata1\\data.log'
Test_labelPath = 'F:\\A_Noise_20180130\\CNN\\testingdata1\\labels.log'

isTrain = False
train_steps = 100
checkpoint_steps = 100
checkpoint_dir = 'myModelC'

tData = []
tLable = []
with open(Train_dataPath) as file:
    for line in file:
        lineVal = line.strip().split(',')
        val = [float(u) for u in lineVal]
        tData.append(val)
with open(Train_labelPath) as file:
    for line in file:
        lineVal = line.strip().split(',')
        val = [int(u) for u in lineVal]
        tLable.append(val)

testData = []
testLable = []
with open(Test_dataPath) as file:
    for line in file:
        lineVal = line.strip().split(',')
        val = [float(u) for u in lineVal]
        testData.append(val)
with open(Test_labelPath) as file:
    for line in file:
        lineVal = line.strip().split(',')
        val = [int(u) for u in lineVal]
        testLable.append(val)

tData = np.array(tData)
tLable = np.array(tLable)
testData = np.array(testData)
testLable = np.array(testLable)

# one_hot 独热码的编码(encoding)形式
#  噪声 : 10
# 非噪声: 01

# None 表示张量(Tensor)的第一个维度可以是任何长度
input_x = tf.placeholder(tf.float32, [None, 200]) # 输入
output_y = tf.placeholder(tf.int32, [None, 2])    # 输出:标签
input_x_images = tf.reshape(input_x, [-1, 200, 1]) # 改变形状之后的输入

# 从 Test(测试)数据集里选取 3000 个手写数字的图片和对应标签
test_x = testData # 图片
test_y = testLable # 标签

# 构建我们的卷积神经网络:
# 第 1 层卷积
conv1 = tf.layers.conv1d(
    inputs=input_x_images, # 形状 [200, 1]
    filters=32,            # 32 个过滤器,输出的深度(depth)是32
    kernel_size=[5],    # 过滤器在二维的大小是(5 * 1)
    strides=1,             # 步长是1
    padding='same',        # same 表示输出的大小不变,因此需要在外围补零 2 圈
    activation=tf.nn.relu  # 激活函数是 Relu
) # 形状 [200, 32]

# 第 1 层池化(亚采样)
pool1 = tf.layers.max_pooling1d(
    inputs=conv1,     # 形状 [28, 28, 32]
    pool_size=[2], # 过滤器在二维的大小是(2 * 1)
    strides=2         # 步长是 2
) # 形状 [100, 32]

# 第 2 层卷积
conv2 = tf.layers.conv1d(
    inputs=pool1,          # 形状 [14, 14, 32]
    filters=64,            # 64 个过滤器,输出的深度(depth)是64
    kernel_size=[5],    # 过滤器在二维的大小是(5 * 1)
    strides=1,             # 步长是1
    padding='same',        # same 表示输出的大小不变,因此需要在外围补零 2 圈
    activation=tf.nn.relu  # 激活函数是 Relu
) # 形状 [100, 64]

# 第 2 层池化(亚采样)
pool2 = tf.layers.max_pooling1d(
    inputs=conv2,     # 形状 [100, 64]
    pool_size=[2], # 过滤器在二维的大小是(2 * 1)
    strides=2         # 步长是 2
) # 形状 [50, 64]

# 平坦化(flat)
flat = tf.reshape(pool2, [-1, 50 * 64]) # 形状 [7 * 7 * 64, ]

# 1024 个神经元的全连接层
dense = tf.layers.dense(inputs=flat, units=1024, activation=tf.nn.relu)

# Dropout : 丢弃 50%, rate=0.5
dropout = tf.layers.dropout(inputs=dense, rate=0.5)

# 2 个神经元的全连接层,这里不用激活函数来做非线性化了
logits = tf.layers.dense(inputs=dropout, units=2) # 输出。形状[1, 1, 2]

# 计算误差(计算 Cross entropy(交叉熵),再用 Softmax 计算百分比概率)
loss = tf.losses.softmax_cross_entropy(onehot_labels=output_y, logits=logits)
# Adam 优化器来最小化误差,学习率 0.001
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)

# 精度。计算 预测值 和 实际标签 的匹配程度
# 返回(accuracy, update_op), 会创建两个局部变量
accuracy = tf.metrics.accuracy(
    labels=tf.argmax(output_y, axis=1),
    predictions=tf.argmax(logits, axis=1),)[1]

saver = tf.train.Saver()  # defaults to saving all variables

with tf.Session() as sess:
    # 初始化变量:全局和局部
    init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    sess.run(init)
    if isTrain:
        for i in range(300):
            # batch = mnist.train.next_batch(50)  # 从 Train(训练)数据集里取“下一个” 50 个样本
            # train_loss, train_op = sess.run([loss, train_op], {input_x: batch[0], output_y: batch[1]})
            train_loss, train_op_ = sess.run([loss, train_op], {input_x: tData, output_y: tLable})
            if i % 100 == 0:
                test_accuracy = sess.run(accuracy, {input_x: test_x, output_y: test_y})
                print(("Step=%d, Train loss=%.4f, [Test accuracy=%.2f]") % (i, train_loss, test_accuracy))
            # 存储模型参数
            if (i + 1) % checkpoint_steps == 0:
                # saver.save(sess, checkpoint_dir + 'model.ckpt', global_step=i+1)
                saver.save(sess, 'myModelC/model.ckpt', global_step=i + 1)
    else:
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            pass
        # 测试:打印 20 个预测值 和 真实值 的对
        # test_output = sess.run(logits, {input_x: test_x[:20]})
        test_output = sess.run(logits, {input_x: test_x[1980:2020]})
        inferenced_y = np.argmax(test_output, 1)
        print(inferenced_y, 'Inferenced numbers')  # 推测的数字
        print(np.argmax(test_y[1980:2020], 1), 'Real numbers')  # 真实的数字

猜你喜欢

转载自blog.csdn.net/purple_lumpy/article/details/80415264