Use tensorflow training cifar-10 data set (refer to the official documents prepared)

♥,.*,.♥,.*,.♥,.*,.♥,.*♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥♥,.*,.♥,.*,.♥,.*,.♥,.*♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥

1.cifar-10 profile data set

The data set is hinton student Alex Krizhevsky and Ilya Sutskever finishing, for identifying a common object.

Contents: airplanes, cars, birds, cats, deer, dogs, frogs, horses, boats, trucks

Image Size: 32 * 32 * 3

2. Code

In fact, officials have had a code file.

Address official documents: https://github.com/tensorflow/models/tree/master/tutorials/image/cifar10

Here refer to the official code has been written, a total of two documents, data_train.py (training model) and data_test.py (model validation).

data_train.py training model

# coding:utf-8
import time

import tensorflow as tf
import os
import sys
import urllib
import tarfile
from datetime import datetime
import time
import math
import numpy as np

# 下载cifar10数据
# 从Alex's网站下载数据
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'

# 下载数据地址
data_dir = 'data_cifar10'
batch_size = 100
# checkpoint地址
train_dir = 'data_train_checkpoint'
checkpoint_dir = 'data_train_checkpoint'
eval_dir = 'data_eval'

data_batch = 1 #训练数据集
test_batch = 2 #验证数据集

# 最大训练的轮数
max_step = 1000000
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
MOVING_AVERAGE_DECAY = 0.9999
NUM_EPOCHS_PER_DECAY = 350.0
LEARNING_RATE_DECAY_FACTOR = 0.1
INITIAL_LEARNING_RATE = 0.1


def download_data():
    # 下载的数据的地址
    data_destdir = data_dir
    # 判断文件夹是否存在,若不存在,则建立文件夹
    if os.path.exists(data_destdir) is False:
        os.makedirs(data_destdir)
    # 提取文件名
    data_filename = DATA_URL.split('/')[-1] # -1取出按照'/'分割后的列表中的最后一项,即文件名
    data_filepath = os.path.join(data_destdir, data_filename)
    # print(data_filepath)
    # data_cifar10/cifar-10-binary.tar.gz
    # 判断文件是否存在,若不存在,将文件存储在目录下
    if os.path.exists(data_filepath) is False:
        # 打印下载进度
        def _progress(count, block_size, total_size):
            # count:已经下载的数据块数量 block_size:数据块的大小 total_size:远程文件的大小
            # -r 回到行首
            sys.stdout.write('\r>>Downloading %s %.3f%%' %
                         (data_filename, float(count*block_size)/float(total_size)*100.0))
            sys.stdout.flush()
        # urlretrieve 用于将数据下载到本地
        # urlretrieve(url, filename=None, reporthook=None, data=None)
        # 返回(filename,headers),保存到本地的路径和服务器的响应头
        # url:远程服务器路径       filename:本地文件的下载路径
        # reporthook 回调函数,使用这个回调函数显示当前的下载进度
        data_filepath, _ = urllib.request.urlretrieve(DATA_URL, data_filepath, _progress)
        print()
        # 下载成功 os.stat 记录了文件的状态信息
        statinfo = os.stat(data_filepath)
        print('Successfully downloaded', data_filename, statinfo.st_size, 'bytes.')
    # 将下载的文件进行解压缩
    data_exact_filepath = os.path.join(data_destdir, 'cifar-10-batches-bin')
    if os.path.exists(data_exact_filepath) is False:
        tarfile.open(data_filepath, 'r:gz').extractall(data_destdir)
    # 下载的数据:
    # cifar-10-batches-bin 文件夹
    # batches.meta.txt  记录数据集中的类别
    # data_batch_1.bin  训练图像10000张
    # data_batch_2.bin  训练图像10000张
    # data_batch_3.bin  训练图像10000张
    # data_batch_4.bin  训练图像10000张
    # data_batch_5.bin  训练图像10000张
    # test_batch.bin    存储了测试图像和图像的标签 10000张
    # readme.html       数据集介绍文件
    #
    # 每一个样本由3073个字节组成
    # 第一个字节为标签 label, 剩下的字节为图像数据
    # <1 x label> <3072 x pixel>
    # 样本和样本之间没有多余的字节分割

# 获取训练数据的图像和标签
def get_images_and_labels(data_type):
    # images 图像向量 4维向量 [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3]
    # labels 标签向量 1维向量 [batch_size]
    global data_dir
    data_dir = 'data_cifar10'
    data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')
    #print("data_dir:", data_dir) #data_cifar10/cifar-10-batches-bin
    # 目录下的文件
    # 若是训练数据集合
    if data_type == data_batch:
        filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in range(1,6)]
    # print(filenames)
    # ['data_cifar10/cifar-10-batches-bin/data_batch_1.bin',
    # 'data_cifar10/cifar-10-batches-bin/data_batch_2.bin',
    # 'data_cifar10/cifar-10-batches-bin/data_batch_3.bin',
    # 'data_cifar10/cifar-10-batches-bin/data_batch_4.bin',
    # 'data_cifar10/cifar-10-batches-bin/data_batch_5.bin']
    # 若是验证数据集
    if data_type == test_batch:
        filenames = [os.path.join(data_dir, 'test_batch.bin')]

    for f in filenames:
        #print(f)
        if not tf.gfile.Exists(f):
            raise ValueError('Failed to find file:' +f )
    # tf.train.string_input_producer tensorflow创建文件名队列
    filename_queue = tf.train.string_input_producer(filenames)

    # ------------------------------------------------------------------------------------------------
    # 从文件名队列中读取文件
    # 文件的规格 32*32*3  label的大小为1字节
    image_width = 32
    image_height = 32
    image_depth = 3
    image_label_bytes = 1
    # 每张图片的字节数
    image_bytes = image_width*image_height*image_depth
    image_size = image_label_bytes + image_bytes

    # 使用reader从文件名队列中读取数据
    # tf.FixedLengthRecordReader会读取固定长度字节数信息
    # 下次调用的时候会接着上次读取的位置继续读取数据
    reader = tf.FixedLengthRecordReader(image_size)
    key, value = reader.read(filename_queue)
    # key   :Tensor("ReaderReadV2:0", shape=(), dtype=string)
    # value :Tensor("ReaderReadV2:1", shape=(), dtype=string)

    # value是一个string类型,将其转换为uint8类型的向量
    image_tensor = tf.decode_raw(value, tf.uint8)
    # image_tensor  :Tensor("DecodeRaw:0", shape=(?,), dtype=uint8)

    # 第一个字节是label,并且将其转换为int32类型向量
    # def strided_slice(input_, #输入的数据
    #                   begin,  #开始
    #                   end,    #结束 结束是开区间
    #                   strides=None,
    #                   begin_mask=0,
    #                   end_mask=0,
    #                   ellipsis_mask=0,
    #                   new_axis_mask=0,
    #                   shrink_axis_mask=0,
    #                   var=None,
    #                   name=None):
    image_label = tf.cast(
        tf.strided_slice(image_tensor, [0], [image_label_bytes]), tf.int32) # 从[0,1)取区间数据
    # print(image_label) #Tensor("Cast:0", shape=(?,), dtype=int32)

    # 标签之后的数据为图像数据
    # reshape [depth * height * width] to [depth, height, width].
    depth_major = tf.reshape(
        tf.strided_slice(image_tensor, [image_label_bytes],
                         [image_size]),
        [image_depth, image_height, image_width]
    )

    # 将[depth, height, width] 转换为 [height, width, depth]
    image_data = tf.transpose(depth_major, [1, 2, 0])
    # print(image_data) #Tensor("transpose:0", shape=(32, 32, 3), dtype=uint8)

    # print(image_label) #Tensor("Cast:0", shape=(?,), dtype=int32)
    # print(image_data)  #Tensor("transpose:0", shape=(32, 32, 3), dtype=uint8)

    # ------------------------------------------------------------------------------------------------
    # 图像增强 对图像进行裁剪,对称变换等
    reshape_image = tf.cast(image_data, tf.float32)
    #print(reshape_image) #Tensor("Cast_1:0", shape=(32, 32, 3), dtype=float32)
    # 将图像裁剪成24*24*3大小
    image_width = 24
    image_height = 24
    image_depth = 3

    # 随机选取图像的一部分进行裁剪
    distorted_image = tf.random_crop(reshape_image, [image_height, image_width, image_depth])
    # 随机对图像进行水平翻转
    distorted_image = tf.image.random_flip_left_right(distorted_image)
    # 随机调整亮度和对比度
    distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
    distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)
    #print(distorted_image) # Tensor("adjust_contrast/Identity_1:0", shape=(24, 24, 3), dtype=float32)

    # 将图片进行标准化
    # (x - mean) / adjusted_stddev    x为图片RGB三通道的像素值  mean为三通道的均值
    # adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))  stddev为三通道的标准差
    float_image = tf.image.per_image_standardization(distorted_image)

    # 设置float_image的shape
    float_image.set_shape([image_height, image_width, image_depth])
    image_label.set_shape([1])


    # 确保shuffling的随机性
    min_fraction_of_examples_in_queue = 0.4
    min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
                             min_fraction_of_examples_in_queue)

    # 制作batch_size大小的数据集
    num_prepocess_threads = 16
    images, labels = tf.train.shuffle_batch(
        [float_image, image_label],
        batch_size=batch_size,
        num_threads=num_prepocess_threads,
        capacity=min_queue_examples + 3 * batch_size, #队列的长度
        min_after_dequeue=min_queue_examples)         #出队列后,队列中至少剩余的数据个数

    #print(images)   # Tensor("shuffle_batch:0", shape=(100, 24, 24, 3), dtype=float32)
    #print(labels)   # Tensor("shuffle_batch:1", shape=(100, 1), dtype=int32)
    labels = tf.reshape(labels, [batch_size])
    #print(labels)   #Tensor("Reshape_1:0", shape=(100,), dtype=int32)
    return images,labels

# 构建神经网络
# 返回神经网络的logits 类别
def inference(images):
    # 第一层卷积层
    with tf.variable_scope('conv1') as scope:
        kernel = tf.get_variable(
            name='weights',
            shape=[5, 5, 3, 64],
            initializer=tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32),
        )
        conv = tf.nn.conv2d(images, kernel, [1,1,1,1], padding='SAME')
        biases = tf.get_variable(name='biases', shape=[64], initializer=tf.constant_initializer(0.0))
        pre_activation = tf.nn.bias_add(conv, biases)
        conv1 = tf.nn.relu(pre_activation, name=scope.name)

    # 池化层
    # stride 【batch,height, width, channels】
    # 第一个1表示batch维度上滑动步长为1,不跳过任何一个样本
    # height 表示卷积核的垂直滑动步长
    # width 表示卷积核的水平滑动步长
    # 1 channel 表示通道维度上的滑动步长为1,不跳过任何一个颜色通道
    pool1 = tf.nn.max_pool(
        conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
    # 归一化层 局部响应归一化层LRN
    norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001/90, beta=0.75, name='norm1')

    # 第二层卷积层
    with tf.variable_scope('conv2') as scope:
        kernel = tf.get_variable(
            name='weights',
            shape=[5, 5, 64, 64],
            initializer=tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32),
        )
        conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
        biases = tf.get_variable(name='biases', shape=[64], initializer=tf.constant_initializer(0.1))
        pre_activation = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(pre_activation, name=scope.name)
    # 归一化层 局部响应归一化层LRN
    norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 90, beta=0.75, name='norm2')
    # 池化曾
    pool2 = tf.nn.max_pool(
        norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')

    # 第三层 全连接层
    with tf.variable_scope('local3') as scope:
        reshape = tf.reshape(pool2, [batch_size, -1])
        # 获取第二维度的长度
        dim = reshape.get_shape()[1].value
        weights = tf.get_variable('weights',
                shape=[dim, 384],
                initializer=tf.truncated_normal_initializer(stddev=0.04, dtype=tf.float32))
        # 权重衰减
        weight_decay = tf.multiply(tf.nn.l2_loss(weights), 0.004, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
        biases = tf.get_variable(name='biases', shape=[384], initializer=tf.constant_initializer(0.1))
        local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)

    # 第四层 全连接层
    with tf.variable_scope('local4') as scope:
        weights = tf.get_variable('weights',
                shape=[384, 192],
                initializer=tf.truncated_normal_initializer(stddev=0.04, dtype=tf.float32))
        weight_decay = tf.multiply(tf.nn.l2_loss(weights), 0.004, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
        biases = tf.get_variable(name='biases', shape=[192], initializer=tf.constant_initializer(0.1))
        local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)

    # softmax线性
    with tf.variable_scope('softmax_linear') as scope:
        weights = tf.get_variable('weights',
                shape=[192, NUM_CLASSES],
                initializer=tf.truncated_normal_initializer(stddev=1/192.0, dtype=tf.float32))
        biases = tf.get_variable(name='biases', shape=[NUM_CLASSES], initializer=tf.constant_initializer(0.0))
        softmax_linear = tf.nn.relu(tf.matmul(local4, weights) + biases, name=scope.name)

    return softmax_linear

# 计算交叉熵损失
def calculate_loss(logits, labels):
    labels = tf.cast(labels, tf.int64)
    # 计算交叉熵
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=labels, logits=logits, name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)
    return tf.add_n(tf.get_collection('losses'), name='total_loss')


# 训练模型,更新模型参数
def train(total_loss, global_step):
    # 每轮训练的batches数量
    num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / batch_size
    # 衰减速度 自定义学习率用
    decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)

    lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,      # 初始化的学习率为0.1 指数衰减学习率
                                    global_step,
                                    decay_steps,                # 衰减步数,决定衰减周期
                                    LEARNING_RATE_DECAY_FACTOR, #衰减率
                                    staircase=True)             # True:以不连续的间隔衰减学习率(阶梯型衰减学习率)
    tf.summary.scalar('learning_rate', lr)
    # 滑动平均损失
    loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
    losses = tf.get_collection('losses')

    # 计算单项损失和总损失的滑动平均
    loss_averages_op = loss_averages.apply(losses + [total_loss])
    #print(loss_averages_op)
    # name: "avg"
    # op: "NoOp"
    # input: "^avg/AssignMovingAvg"
    # input: "^avg/AssignMovingAvg_1"
    # input: "^avg/AssignMovingAvg_2"
    # input: "^avg/AssignMovingAvg_3"

    # losses:
    # [<tf.Tensor 'local3/weight_loss:0' shape=() dtype=float32>,
    # <tf.Tensor 'local4/weight_loss:0' shape=() dtype=float32>,
    # <tf.Tensor 'cross_entropy:0' shape=() dtype=float32>]
    # [totalloss]:
    # [<tf.Tensor 'total_loss:0' shape=() dtype=float32>]

    for l in losses + [total_loss]:
        # print(l.op.name)
        tf.summary.scalar(l.op.name + ' (raw)', l)
        tf.summary.scalar(l.op.name, loss_averages.average(l))

    # 计算梯度
    with tf.control_dependencies([loss_averages_op]):
        opt = tf.train.GradientDescentOptimizer(lr)
        grads = opt.compute_gradients(total_loss)

    # 应用梯度
    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    # 为变量添加直方图  便于将其分布情况在tensorboard上显示
    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name, var)
    for grad, var in grads:
        if grad is not None:
            tf.summary.histogram(var.op.name + '/gradients', grad)

    # 跟踪所有变量的滑动平均值
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
        train_op = tf.no_op(name='train')

    return train_op

# 训练数据
def train_data():
    with tf.Graph().as_default():
        # 计算step
        global_step = tf.contrib.framework.get_or_create_global_step()  # 返回global step变量,如果则创建一个
        # 获取图片和标签
        images, labels = get_images_and_labels(data_batch)

        # 搭建神经网络
        logits = inference(images)

        # 计算损失函数
        loss = calculate_loss(logits, labels)

        # 训练神经网络并且更新参数
        train_op = train(loss, global_step)

        #sess = tf.Session()
        sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True,
            log_device_placement =None))

        # 保存模型
        saver = tf.train.Saver(tf.global_variables())
        init = tf.global_variables_initializer()
        sess.run(init)
        tf.train.start_queue_runners(sess=sess)

        for step in range(max_step):
            if step == 0:
                start_time = time.time()
            sess.run(train_op)
            if step % 10 == 0:
                current_time = time.time()
                duration = current_time - start_time
                start_time = current_time
                loss_value = sess.run(loss)
                sec_per_batch = float(duration /batch_size)
                format_str = ('%s: step %d, loss = %.2f (%.3f'' sec/batch)')
                print(format_str % (datetime.now(), step+1, loss_value, sec_per_batch))
            if step % 100 == 0:
                saver = tf.train.Saver(tf.global_variables())
                checkpoint_path = os.path.join(train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)


def main(argv=None):
    # 下载数据
    download_data()
    # 训练数据并保存模型
    train_data()

if __name__=="__main__":
    # 调用main函数
    tf.app.run()




'''
2020-01-12 00:14:03.929213: step 1, loss = 4.67
2020-01-12 00:14:09.714377: step 11, loss = 4.65 
2020-01-12 00:15:02.531324: step 101, loss = 4.41 
2020-01-12 00:15:07.186746: step 111, loss = 4.37
2020-01-12 00:15:11.605784: step 121, loss = 4.41 
2020-01-12 00:17:40.982859: step 371, loss = 3.70 
2020-01-12 00:17:50.101351: step 381, loss = 3.62 
2020-01-12 00:17:59.200679: step 391, loss = 3.54
2020-01-12 00:21:14.842655: step 721, loss = 3.06 
2020-01-12 00:21:19.576794: step 731, loss = 3.07 
2020-01-12 00:21:24.416611: step 741, loss = 3.04
2020-01-12 00:27:49.150547: step 1271, loss = 2.39 
2020-01-12 00:27:57.067001: step 1281, loss = 2.43 
2020-01-12 00:28:04.061937: step 1291, loss = 2.20 
2020-01-12 00:36:10.288841: step 1901, loss = 1.86
2020-01-12 00:36:18.764131: step 1911, loss = 1.87 
2020-01-12 00:36:26.248622: step 1921, loss = 1.90 
'''


data_test.py (test model)

# coding:utf-8

import tensorflow as tf
import data_train
import numpy as np
from datetime import datetime
import math

test_batch = 2 # 表明获取验证数据集,1是获取训练数据集
checkpoint_dir = 'data_train_checkpoint'
num_examples = 10000
batch_size = 100
# 在验证数据集上验证模型
def test_data():
    with tf.Graph().as_default() as g:
        images, labels = data_train.get_images_and_labels(test_batch)
        logits = data_train.inference(images)
        # 获取模型输出的预测类型
        top_k_op = tf.nn.in_top_k(logits, labels, 1)
        # print(top_k_op)
        saver = tf.train.Saver()
        with tf.Session() as sess:
            # 加载模型
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            # 判断模型是否存在
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            else:
                print('No checkpoint file found')
                return

            coord = tf.train.Coordinator()
            try:
                threads = []
                for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                    threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
                                                     start=True))
                num_iter = int(math.ceil(num_examples / batch_size))
                # 正确的个数 true_count = 0
                true_count = 0
                # 样本的总个数
                total_sample_count = num_iter * batch_size
                step = 0
                while step < num_iter and not coord.should_stop():
                    predictions = sess.run([top_k_op])
                    true_count += np.sum(predictions)
                    step += 1

                precision = true_count / total_sample_count
                print('%s: step:%s precision: %.3f' % (datetime.now(), global_step, precision))
            except Exception as e:  # pylint: disable=broad-except
                coord.request_stop(e)
            coord.request_stop()
            coord.join(threads, stop_grace_period_secs=10)


def main(argv=None):
    # 加载保存的模型
    # 验证数据 test_batch.bin
    while True:
        test_data()

if __name__=="__main__":
    # 调用main函数
    tf.app.run()
'''
2020-01-12 00:26:36.471554: step:1100 precision: 0.475
2020-01-12 00:27:00.779778: step:1100 precision: 0.477
2020-01-12 00:27:27.050172: step:1200 precision: 0.498
2020-01-12 00:27:51.298032: step:1200 precision: 0.498
2020-01-12 00:28:12.242880: step:1200 precision: 0.498
2020-01-12 00:28:35.227023: step:1300 precision: 0.483
2020-01-12 00:28:57.797969: step:1300 precision: 0.490
2020-01-12 00:29:22.142905: step:1300 precision: 0.488
2020-01-12 00:29:42.970540: step:1300 precision: 0.490
2020-01-12 00:30:04.621254: step:1400 precision: 0.551
2020-01-12 00:30:25.379606: step:1400 precision: 0.552
2020-01-12 00:30:46.505547: step:1400 precision: 0.545
2020-01-12 00:31:08.243943: step:1400 precision: 0.559
2020-01-12 00:31:30.720794: step:1500 precision: 0.544
2020-01-12 00:31:51.662406: step:1500 precision: 0.550
2020-01-12 00:32:11.204215: step:1500 precision: 0.551
2020-01-12 00:32:34.243997: step:1600 precision: 0.543
2020-01-12 00:32:56.652219: step:1600 precision: 0.525
2020-01-12 00:33:20.620415: step:1600 precision: 0.539
2020-01-12 00:33:43.651637: step:1600 precision: 0.532
2020-01-12 00:34:06.404304: step:1700 precision: 0.580
2020-01-12 00:34:30.844736: step:1700 precision: 0.580
2020-01-12 00:34:54.849618: step:1700 precision: 0.579
2020-01-12 00:35:16.788894: step:1700 precision: 0.586
2020-01-12 00:35:36.770714: step:1800 precision: 0.570
2020-01-12 00:35:57.119815: step:1800 precision: 0.572
2020-01-12 00:36:17.850892: step:1800 precision: 0.574
2020-01-12 00:36:38.196545: step:1900 precision: 0.562
2020-01-12 00:36:58.430616: step:1900 precision: 0.559
2020-01-12 00:37:19.160773: step:1900 precision: 0.567
2020-01-12 00:37:39.523627: step:1900 precision: 0.573
2020-01-12 00:38:00.199354: step:2000 precision: 0.581
2020-01-12 00:38:20.349644: step:2000 precision: 0.582
2020-01-12 00:38:39.577608: step:2000 precision: 0.580
2020-01-12 00:39:00.874597: step:2100 precision: 0.581
2020-01-12 00:39:20.726392: step:2100 precision: 0.577
2020-01-12 00:39:40.988667: step:2100 precision: 0.581
'''

♥,.*,.♥,.*,.♥,.*,.♥,.*♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥♥,.*,.♥,.*,.♥,.*,.♥,.*♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥,.*,.♥

commercial time:

This baby opened a public number, record the daily depth of learning and reinforcement learning notes.

I hope we can make progress together, hee hee hee! Seeking attention, love you Yo!

Published 127 original articles · won praise 127 · Views 200,000 +

Guess you like

Origin blog.csdn.net/Valieli/article/details/103895218