大量数据的深度学习网络

input_data

import tensorflow as tf
import numpy as np
import os


#image nroms
img_width = 208
img_hight = 208


def get_files(file_dir):
    """
    Args : file directory
    Returns:
        list of images and labels
    函数说明: 生成标签文件
    """
    cats = []
    label_cats = []
    dogs =[]
    label_dogs= []
    for file in os.listdir(file_dir):
        name = file.split(sep='.')
        if name[0] == 'cat':
            cats.append(file_dir + file)
            label_cats.append(0)
        else:
            dogs.append(file_dir + file)
            label_dogs.append(1)
    print('There are %d cats\n There are %d dogs'%(len(cats), len(dogs)))

    image_list = np.hstack((dogs, cats))
    label_list = np.hstack((label_dogs, label_cats))

    temp = np.array([image_list, label_list])
    temp = temp.transpose()
    np.random.shuffle(temp)
    image_list = list(temp[:, 0])
    label_list = list(temp[:, 1])
    label_list = [int(i) for i in label_list]

    return image_list, label_list

def get_batch(image, label, image_W, image_H, batch_size, capacity):
    """
    :argument:
        batch_size : input one batch size
        capacity:  the maximum elements in queue
    :return
        image_batch : 4D tensor [batch_size, width, height, 3], type: tf.float32
        label_batch : 1D tensor [batch_size], dtype: tf.int32

    """
    image = tf.cast(image, tf.string)
    label = tf.cast(label, tf.int32)

    #make an input queue
    input_queue = tf.train.slice_input_producer([image, label])
    label = input_queue[1]
    image_contents = tf.read_file(input_queue[0])
    image = tf.image.decode_jpeg(image_contents, channels=3)
    """
    resize image
    """
    image = tf.image.resize_image_with_crop_or_pad(image, image_W, image_H)
    image = tf.image.per_image_standardization(image)


    image_batch, label_batch = tf.train.batch([image, label], batch_size=batch_size, num_threads=32
                                              , capacity=capacity)

    label_batch = tf.reshape(label_batch, [batch_size])

    return image_batch, label_batch

import matplotlib.pyplot as plt
BATCH_SIZE = 8
CAPACITY = 512
IMG_W = 208
IMG_H = 208
train_dir = 'G:/record/tensorflow/tensor_dog/data/train/'

image_list, label_list = get_files(train_dir)
image_batch, label_batch = get_batch(image_list, label_list, IMG_W, IMG_H, batch_size=BATCH_SIZE, capacity=CAPACITY)

with tf.Session() as sess:
    i = 0
    coord = tf.train.Coordinator() #
    threads = tf.train.start_queue_runners(coord = coord)
    try:
        while not coord.should_stop() and i < 1:
            img, label = sess.run([image_batch, label_batch])

            for j in np.arange(BATCH_SIZE):
                print('label %d' %label[j])
                plt.imshow(img[j, :, :, :])
                plt.show()
            i += 1

    except tf.errors.OutOfRangeError:
        print('Done')
    finally:
        coord.request_stop()
    coord.join(threads)

model

import tensorflow as tf

def inference(images, batch_size, n_classes):
    """
    Build model
    Args:
        images : image batch, 4D tensor, tf.float32, [batch_size, width, height, channels]
    Returns:
        output : tensor with the computed logits, float, [batch_size, n_classes]

    """
    #conv1
    with tf.variable_scope('Conv1') as scope:
        weights1 = tf.get_variable('weights',
                                  shape=[3, 3, 3, 16],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))

        biases1 = tf.get_variable('biases', shape=[16], dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(images, weights1, strides=[1,1,1,1], padding = 'SAME')
        pre_activtion = tf.nn.bias_add(conv, biases1)
        conv1 = tf.nn.relu(pre_activtion, name= scope.name)

    #pool1 and norml
    with tf.variable_scope('Pooling1_lrn') as scope:
        pool1 = tf.nn.max_pool(conv1, ksize=[1,3,3,1], strides=[1,2,2,1],
                               padding= 'SAME')
        norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001/9.0,
                          beta=0.75, name='norm1')
    #conv2
    with tf.variable_scope('Conv2') as scope:
        weights2 = tf.get_variable('weights',
                                  shape=[3, 3, 16, 16],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))

        biases2 = tf.get_variable('biases', shape=[16], dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(norm1, weights2, strides=[1,1,1,1], padding = 'SAME')
        pre_activtion = tf.nn.bias_add(conv, biases2)
        conv2 = tf.nn.relu(pre_activtion, name= scope.name)

    #pool2 and norml
    with tf.variable_scope('Pooling1_lrn') as scope:
        norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001/9.0,
                          beta=0.75, name='norm2')
        pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1],
                               padding='SAME')

    #local3
    with tf.variable_scope('local3') as scope:
        reshape = tf.reshape(pool2, shape=[batch_size, -1]) #将值进行拉伸为一维
        dim = reshape.get_shape()[1].value
        weights3 = tf.get_variable('weights',
                                   shape=[dim, 128],
                                   dtype=tf.float32,
                                   initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases3 = tf.get_variable('biases',
                                  shape=[128],
                                  dtype=tf.float32,
                                  initializer=tf.constant_initializer(0.1))
        local3 = tf.nn.relu(tf.matmul(reshape, weights3) + biases3, name=scope.name)

    #local4
    with tf.variable_scope('local4') as scope:
        weights4 = tf.get_variable('weights',
                                   shape=[128, 128],
                                   dtype=tf.float32,
                                   initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases4 = tf.get_variable('biases',
                                  shape=[128],
                                  dtype=tf.float32,
                                  initializer=tf.constant_initializer(0.1))
        local4 = tf.nn.relu(tf.matmul(local3, weights4) + biases4, name=scope.name)
    #softmax
    with tf.variable_scope('softmax_linear') as scope:
        weight5 = tf.get_variable('softmax_linear',
                                  shape=[128, n_classes],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases5 = tf.get_variable('biases',
                                  shape=[n_classes],
                                  dtype=tf.float32,
                                  initializer=tf.constant_initializer(0.1))
        softmax_linear = tf.add(tf.matmul(local4, weight5), biases5)

    return softmax_linear


def losses(logits, labels):
    """
    count loss
    """

    with tf.variable_scope('loss') as scope:
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels,
                                                                       name = 'cross_entropy')
        loss = tf.reduce_mean(cross_entropy, name='loss')
        tf.summary.scalar(scope.name + '/loss', loss)  #显示loss值的变化
    return loss

def training(loss, learning_rate):
    with tf.name_scope('optimizer'):
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        global_step = tf.Variable(0, name='global_step', trainable=False)
        train_op = optimizer.minimize(loss)
    return train_op

def evaluation(logits, labels):
    """
    评估精度
    """
    with tf.variable_scope('accuracy') as scope:
        correct = tf.nn.in_top_k(logits, labels, 1)
        correct = tf.cast(correct, tf.float32)
        accuracy = tf.reduce_mean(correct)
        tf.summary.scalar(scope.name + '/accuracy', accuracy)
    return accuracy

training
import os
import numpy as np
import tensorflow as tf
import input_data
import model
import matplotlib.pyplot as plt
from PIL import Image


N_CLASSES = 2
IMG_W = 208
IMG_H = 208
BATCH_SIZE = 16
CAPACITY = 2000
MAX_STEP = 15000
learning_rate = 0.0001

def run_traing():
    train_dir = 'G:/record/tensorflow/cat_vs_dog/data/train/'
    logs_train_dir ='G:/record/tensorflow/cat_vs_dog/log/train/'

    train, train_label = input_data.get_files(train_dir)
    train_batch, train_label_batch = input_data.get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.training(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)


    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess = sess, coord =coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _,tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
            if step % 50 == 0:
                print('Step %d, train_loss=%.2f, train_accuracy= %.2f' % (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path =os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path)
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit rechead')
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()

def get_one_image(train):
    """
    ndarray
    """
    n = len(train)
    ind = np.random.randint(0, n)
    img_dir = train[ind]

    image = Image.open(img_dir)
    plt.imshow(image)
    plt.show()
    image = image.resize([208, 208])
    image = np.array(image)

    return image

def evaluate_one_image():
    train_dir = 'G:/record/tensorflow/cat_vs_dog/data/train/'
    train, train_label = input_data.get_files(train_dir)
    image_array = get_one_image(train)

    with tf.Graph().as_default():
        BATCH_SIZE = 1
        N_CLASSES = 2

        image = tf.cast(image_array, tf.float32)
        image = tf.reshape(image, [1, 208, 208, 3])
        logit = model.inference(image, BATCH_SIZE, N_CLASSES)
        logit = tf.nn.softmax(logit)

        x = tf.placeholder(tf.float32, shape=[208, 208, 3])
        logs_train_model = 'G:/record/tensorflow/cat_vs_dog/data/log/train/'

        saver = tf.train.Saver()
        with tf.Session() as sess:
            print('Reading Checkpoints')
            ckpt = tf.train.get_checkpoint_state(logs_train_model)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loding success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')

            prediction = sess.run(logit, feed_dict={x: image_array})
            max_index = np.argmax(prediction)
            if max_index == 0:
                print('This is a cat with possibility %.6f' % prediction[:, 0])
            else:
                print('This is a dog with possibility %.6f' % prediction[:, 1])


if __name__ == '__main__':
    evaluate_one_image()

猜你喜欢

转载自blog.csdn.net/qq_38737845/article/details/79684453