猫狗识别(tensorflow)kaggle练习题

  1. 导入猫狗图片数据并标签0和1.将图片数据处理为 tf 能够识别的数据格式,并将数据设计批次:
    1).get_files() 方法读取图片,然后根据图片名,添加猫狗 label,然后再将 image和label 放到数组中,打乱顺序返回.
    2).将第一步处理好的图片和label数组转化为tensorflow能够识别的格式,然后将图片裁剪和补充进行标准化处理,分批次返回.
    input_data.py
import tensorflow as tf
import os
import numpy as np

#数据集的文件名是以type.num.jpg的方式命名,如cat.0.jpg
def get_files(file_dir):
    cats = []
    label_cats = []
    dogs = []
    label_dogs = []
    for file in os.listdir(file_dir):
        name = file.split(sep='.')
        if 'cat' in name[0]:
            cats.append(file_dir + file)
            label_cats.append(0)
        else:
            if 'dog' in name[0]:
                dogs.append(file_dir + file)
                label_dogs.append(1)
        image_list = np.hstack((cats, dogs))
        label_list = np.hstack((label_cats, label_dogs))

    # 把标签和图片都放倒一个 temp 中 然后打乱顺序,然后取出来
    temp = np.array([image_list,label_list])#列表转化为矩阵
    temp = temp.transpose()#transpose的操作对象是矩阵,转置一下
    np.random.shuffle(temp)#打乱顺序

    image_list = list(temp[:, 0])
    label_list = list(temp[:, 1])
    label_list = [int(i) for i in label_list]
    return image_list, label_list

    # train_img = image_list[0:int(len(image_list)*0.7)]
    # train_label = label_list[0:int(len(image_list)*0.7)]
    # valid_img = image_list[int(len(image_list)*0.7):]
    # valid_label = label_list[int(len(image_list)*0.7):]
    # return train_img,train_label,valid_img,valid_label

def get_batch(image, label, image_w, image_h, batch_size, capacity):#capacity: 队列中 最多容纳图片的个数
    #转换成tf能识别的格式
    image = tf.cast(image, tf.string)
    label = tf.cast(label, tf.int32)

    input_queue = tf.train.slice_input_producer([image, label])#tf.train.slice_input_producer是一个tensor生成器,作用是
    # 按照设定,每次从一个tensor列表中按顺序或者随机抽取出一个tensor放入文件名队列。
    label = input_queue[1]
    img_contents = tf.read_file(input_queue[0])#一维
    image = tf.image.decode_jpeg(img_contents, channels=3)#解码成三维矩阵
    image = tf.image.resize_image_with_crop_or_pad(image, image_w, image_h)
    image = tf.image.per_image_standardization(image)

    # 生成批次  num_threads 有多少个线程根据电脑配置设置
    image_batch, label_batch = tf.train.batch([image, label], batch_size=batch_size, num_threads=64, capacity=capacity)
    label_batch = tf.reshape(label_batch, [batch_size])  # 重新排列label,行数为[batch_size]
    image_batch = tf.cast(image_batch, tf.float32)

    return image_batch, label_batch




数据集自动分训练集和验证集:https://blog.csdn.net/Gewenfei_1/article/details/81214728

以上代码可简化为:

import numpy as np
import tensorflow as tf
import os
import cv2
import matplotlib.pyplot as plt
import os
from PIL import Image

def get_files(file_dir):
    cats = []
    dogs = []
    cats_label = []
    dogs_label = []
    img_dirs = os.listdir(file_dir)#读取文件名下所有!目录名(列表形式)
    for img_name in img_dirs:# cat.0.jpg
        name = img_name.split(".")# ['cat', '0', 'jpg']
        if  name[0] == "cat":
            cats.append(file_dir + img_name)#此处不可以省为img_name,下个函数tf.train.slice_input_producer读取的是地址!!
            cats_label.append(0)
        else:
            if name[0] == "dog":
                dogs.append(file_dir + img_name)
                dogs_label.append(1)

    img_list = np.hstack((cats, dogs))#列表(字符串形式)
    label_list = np.hstack((cats_label, dogs_label))#列表(整数形式)
    return img_list, label_list

#############################################

def get_batch(image, label, image_w, image_h, batch_size, capacity):#capacity: 队列中 最多容纳图片的个数

    input_queue = tf.train.slice_input_producer([image, label])#tf.train.slice_input_producer是一个tensor生成器,作用是
    # 按照设定,每次从一个tensor列表中按顺序或者随机抽取出一个tensor放入文件名队列。
    label = input_queue[1]
    img_contents = tf.read_file(input_queue[0])#一维
    image = tf.image.decode_jpeg(img_contents, channels=3)#解码成三维矩阵
    image = tf.image.resize_image_with_crop_or_pad(image, image_w, image_h)
    image = tf.cast(image, tf.float32)
    image = tf.image.per_image_standardization(image)

    # 生成批次  num_threads 有多少个线程根据电脑配置设置
    image_batch, label_batch = tf.train.batch([image, label], batch_size=batch_size, num_threads=64, capacity=capacity)

    return image_batch, label_batch



我们使用tf.train.string_input_producer函数。这个函数需要传入一个文件名list(文件名地址),系统会自动将它转为一个文件名队列。tf.train.slice_input_producer可以传入文件名的同时传入标签。

此外tf.train.string_input_producer还有两个重要的参数,一个是num_epochs。另外一个就是shuffle,shuffle是指在一个epoch内文件的顺序是否被打乱。shuffle=False(不打乱)。

除了tf.train.string_input_producer外,我们还要额外介绍一个函数:tf.train.start_queue_runners。
在我们使用tf.train.string_input_producer创建文件名队列后,整个系统其实还是处于“停滞状态”的,也就是说,我们文件名并没有真正被加入到队列中。此时如果我们开始计算,因为内存队列中什么也没有,计算单元就会一直等待,导致整个系统被阻塞。而使用tf.train.start_queue_runners之后,计算单元就可以拿到数据并进行计算,整个程序也就跑起来了。
链接:[https://zhuanlan.zhihu.com/p/27238630]

2.卷积神经网络提取特征 model.py


import tensorflow as tf

def inference(image, batch_size, n_classes):
    with tf.variable_scope("conv1") as scope:#课本108,variable_scope控制get_variable是获取(reuse=True)还是创建变量
        weights = tf.get_variable("weights", shape=[3,3,3,16], dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases = tf.get_variable("biases", shape=[16], dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(image, weights, strides=[1,1,1,1], padding="SAME")
        pre_activation = tf.nn.bias_add(conv, biases)
        conv1 = tf.nn.relu(pre_activation, name=scope.name)

    with tf.variable_scope("pooling1_lrn") as scope:
        pool1 = tf.nn.max_pool(conv1, ksize=[1,3,3,1], strides=[1,2,2,1], padding="SAME", name="pooling1")
        norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001/9.0,beta=0.75, name="norm1")#局部响应归一化??????
    with tf.variable_scope("conv2") as scope:
        weights = tf.get_variable("weights", shape=[3,3,16,16], dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases = tf.get_variable("biases", shape=[16], dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(norm1, weights, strides=[1,1,1,1], padding="SAME")
        pre_activation = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(pre_activation, name=scope.name)

    with tf.variable_scope("pooling2_lrn") as scope:
        norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001/9.0,beta=0.75, name="norm2")
        pool2 = tf.nn.max_pool(norm2, ksize=[1,3,3,1], strides=[1,2,2,1], padding="SAME", name="pooling2")

    with tf.variable_scope("local3") as scope:
        reshape = tf.reshape(pool2, shape=[batch_size, -1])
        dim = reshape.get_shape()[1].value
        weights = tf.get_variable("weights", shape=[dim, 128], dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
        biases = tf.get_variable("biases", shape=[128], dtype=tf.float32, initializer=tf.constant_initializer(0.1))
    local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)

    with tf.variable_scope("local4") as scope:
        weights = tf.get_variable("weights", shape=[128, 128], dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
        biases = tf.get_variable("biases", shape=[128], dtype=tf.float32, initializer=tf.constant_initializer(0.1))
    local4 = tf.nn.relu(tf.matmul(local3, weights) + biases,name="local4")

    with tf.variable_scope("softmax_linear") as scope:
        weights = tf.get_variable("weights", shape=[128, n_classes], dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
        biases = tf.get_variable("biases", shape=[n_classes], dtype=tf.float32, initializer=tf.constant_initializer(0.1))
    softmax_linear = tf.nn.relu(tf.matmul(local4, weights) + biases,name="softmax_linear")

    return softmax_linear

def loss(logits, labels):#输出结果和标准答案
    with tf.variable_scope("loss") as scope:
        cross_entropy= tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels, name="entropy_per_example")
        loss = tf.reduce_mean(cross_entropy)
        tf.summary.scalar(scope.name +"/loss",loss)#对标量数据汇总和记录使用tf.summary.scalar
    return loss

def training(loss, learning_rate):
    with tf.name_scope("optimizer"):
        global_step = tf.Variable(0, name="global_step", trainable=False)#定义训练的轮数,为不可训练的参数
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        train_op= optimizer.minimize(loss, global_step=global_step)
        #上两行等价于train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss,global_step=global_step)
    return train_op

def evalution(logits, labels):
    with tf.variable_scope("accuracy") as scope:
        correct = tf.nn.in_top_k(logits, labels, 1)#下面
        correct = tf.cast(correct, tf.float16)
        accuracy = tf.reduce_mean(correct)
        tf.summary.scalar(scope.name+"/accuracy", accuracy)#用来显示标量信息
    return accuracy

"""
top_1_op取样本的最大预测概率的索引与实际标签对比,top_2_op取样本的最大和仅次最大的两个预测概率与实际标签对比,
如果实际标签在其中则为True,否则为False。
"""

3.训练 training.py

import tensorflow as tf
import numpy as np
import os
import _input_data
import model

N_CLASSES = 2
IMG_W = 208
IMG_H = 208
BATCH_SIZE = 32
CAPACITY = 256
STEP = 15000   #训练步数应当大于10000
LEARNING_RATE = 0.0001

x = tf.placeholder(tf.float32, shape=[None,129792])
y_ = tf.placeholder(tf.float32, shape=[None, 2])

def run_training():
    train_dir = "F:/mytest/2.cat_dog/train/train/"
    log_train_dir = "F:/mytest/2.cat_dog/train_savenet/"

    train,train_labels = _input_data.get_files(train_dir)
    train_batch, train_label_batch = _input_data.get_batch(train, train_labels, IMG_W,IMG_H,BATCH_SIZE,CAPACITY)

    train_logits= model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss= model.loss(train_logits, train_label_batch)
    train_op = model.training(train_loss, LEARNING_RATE)
    train_acc = model.evalution(train_logits, train_label_batch)
    summary_op = tf.summary.merge_all()#merge_all 可以将所有summary全部保存到磁盘,以便tensorboard显示。
    # 一般这一句就可显示训练时的各种信息。

    sess = tf.Session()
    train_writer  =tf.summary.FileWriter(log_train_dir, sess.graph)#指定一个文件用来保存图
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    #  Coordinator  和 start_queue_runners 监控 queue 的状态,不停的入队出队
    coord = tf.train.Coordinator()#https://blog.csdn.net/weixin_42052460/article/details/80714539
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])

            if step % 50 == 0:#%.2f表示输出浮点数并保留两位小数。%%表示直接输出一个%
                print("step %d, train loss = %.2f, train accuracy  = %.2f%%" %(step, tra_loss, tra_acc*100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)  #?????????????


            if step % 2000 == 0 or (step+1) ==STEP:
                # 每隔2000步保存一下模型,模型保存在 checkpoint_path 中
                checkpoint_path = os.path.join(log_train_dir, "model.ckpt")
                saver.save(sess, checkpoint_path, global_step=step)
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')

    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()

run_training()

4.测试一张图片

import tensorflow as tf
from PIL import Image
import numpy as np
import os
import model
import matplotlib.pyplot as plt
import input_data

def  get_one_img(test):#从指定目录中选取一张图片
    file = os.listdir(test)#os.listdir()返回指定目录下的所有文件和目录名。
    n = len(file)
    ind = np.random.randint(0, n)
    img_dir = os.path.join(test, file[ind])#判断是否存在文件或目录name
    image= Image.open(img_dir)
    plt.imshow(image)
    plt.show()
    image = image.resize([208, 208])
    image = np.array(image)
    return image

def evaluate_one_img():
    test = "F:/mytest/2.cat_dog/test/test/"
    test_array = get_one_img(test)

    with tf.Graph().as_default():#https://www.cnblogs.com/studylyn/p/9105818.html
        BATCH_SIZE = 1
        N_CLASSES = 2
        image = tf.cast(test_array, tf.float32)
        image = tf.image.per_image_standardization(image)
        image = tf.reshape(image,[1,208,208,3])
        logit = model.inference(image, BATCH_SIZE, N_CLASSES)
        logit = tf.nn.softmax(logit)

        x =tf.placeholder(tf.float32, shape =[208,208,3])

        log_test_dir = "F:/mytest/2.cat_dog/train_save"
        saver = tf.train.Saver()

        with tf.Session() as sess:
            print("从指定路径中加载模型。。。")
            #将模型加载到sess中
            ckpt = tf.train.get_checkpoint_state(log_test_dir)
            if ckpt and ckpt.model_checkpoint_path:#https://blog.csdn.net/u011500062/article/details/51728830/
                global_step  = ckpt.model_checkpoint_path.split("/")[-1].split("-")[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print("模型加载成功,训练的步数为 %s", global_step)
            else:
                print("模型加载失败,文件没有找到。")

            #将图片输入到模型计算
            prediction = sess.run(logit, feed_dict={x: test_array})
            max_index = tf.argmax(prediction) # 将图片输入到模型计算
            if max_index==0:
                print('猫的概率 %.6f' %prediction[:, 0])
            else:
                print('狗的概率 %.6f' %prediction[:, 1])
# 测试
evaluate_one_img()





转载:[https://blog.csdn.net/u012373815/article/details/78768727]

猜你喜欢

转载自blog.csdn.net/qq_42219077/article/details/82345405
今日推荐