tensorflow实战小项目——面部表情识别(3)

将数据的准备工作完成之后,我们接下来来构建本次项目需要使用的CNN网络。本次构建的网络包含2个卷积层,2个池化层,2个全连接层以及1个softmax层,为了训练的方便,还在模型代码中加入了计算损失和准确率以及训练的函数,方便之后直接调用。具体代码如下:

import tensorflow as tf


def inference(images, batch_size, n_classes, regularizer, reuse):
    # input 48x48x1
    # output 48x48x32
    with tf.variable_scope('conv1', reuse=reuse) as scope:
        conv1_weights = tf.get_variable("weights", shape=[3, 3, 1, 16], dtype=tf.float32,
                                        initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))

        conv1_biases = tf.get_variable("biases", shape=[16], dtype=tf.float32,
                                       initializer=tf.constant_initializer(0.1))

        conv1 = tf.nn.conv2d(images, conv1_weights, strides=[1, 1, 1, 1], padding="SAME")
        pre_activation = tf.nn.bias_add(conv1, conv1_biases)
        activation = tf.nn.relu(pre_activation, name=scope.name)

    # input 48x48x1
    # output 24x24x16
    with tf.variable_scope('pool1') as scope:
        pool1 = tf.nn.max_pool(activation, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", name=scope.name)

    # input 24x24x32
    # output 24x24x64
    with tf.variable_scope('conv2', reuse=reuse) as scope:
        conv2_weights = tf.get_variable("weights", shape=[3, 3, 16, 32], dtype=tf.float32,
                                        initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))

        conv2_biases = tf.get_variable("biases", shape=[32], dtype=tf.float32,
                                       initializer=tf.constant_initializer(0.1))

        conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding="SAME", name=scope.name)
        pre_activation = tf.nn.bias_add(conv2, conv2_biases)
        activation = tf.nn.relu(pre_activation)
    # input 24x24x16
    # output 12x12x32
    with tf.variable_scope('pool2') as scope:
        pool2 = tf.nn.max_pool(activation, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", name=scope.name)

    with tf.variable_scope('fc1', reuse=reuse) as scope:
        reshaped = tf.reshape(pool2, shape=[batch_size, -1])
        dim = reshaped.get_shape()[1].value
        fc1_weights = tf.get_variable("weights", shape=[dim, 2048], dtype=tf.float32,
                                      initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        if regularizer is not None:
            tf.add_to_collection("losses", regularizer(fc1_weights))

        fc1_biases = tf.get_variable("biases", shape=[2048], dtype=tf.float32,
                                     initializer=tf.constant_initializer(0.1))

        fc1 = tf.matmul(reshaped, fc1_weights) + fc1_biases
        activation = tf.nn.relu(fc1, name=scope.name)
        if not reuse:
            activation = tf.nn.dropout(activation, keep_prob=0.5)

    with tf.variable_scope('fc2', reuse=reuse) as scope:
        fc2_weights = tf.get_variable("weights", shape=[2048, 512], dtype=tf.float32,
                                      initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        if regularizer is not None:
            tf.add_to_collection("losses", regularizer(fc2_weights))

        fc2_biases = tf.get_variable("biases", shape=[512], dtype=tf.float32,
                                     initializer=tf.constant_initializer(0.0))

        fc2 = tf.matmul(activation, fc2_weights) + fc2_biases
        activation = tf.nn.relu(fc2, name=scope.name)
        if not reuse:
            activation = tf.nn.dropout(activation, keep_prob=0.5)

    with tf.variable_scope('softmax', reuse=reuse) as scope:
        softmax_weights = tf.get_variable("weights", shape=[512, n_classes], dtype=tf.float32,
                                          initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))

        softmax_biases = tf.get_variable("biases", shape=[n_classes], dtype=tf.float32,
                                         initializer=tf.constant_initializer(0.1))

        softmax_linear = tf.add(tf.matmul(activation, softmax_weights), softmax_biases, name=scope.name)

    return softmax_linear


def losses(logits, labels):
    with tf.variable_scope('loss') as scope:
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels,
                                                                       name='entropy_per_example')

        cross_entropy_mean = tf.reduce_mean(cross_entropy, name=scope.name)
        loss = tf.add_n(tf.get_collection("losses")) + cross_entropy_mean
        tf.summary.scalar(scope.name + '/loss', cross_entropy_mean)
    return loss


def training(loss, learning_rate):
    with tf.variable_scope('optimizer') as scope:
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        global_step = tf.Variable(0, trainable=False, name='global_step')
        train_op = optimizer.minimize(loss, global_step=global_step, name=scope.name)
    return train_op


def evaluation(logits, labels):
    with tf.variable_scope('accuracy') as scope:
        correct = tf.nn.in_top_k(logits, labels, 1)
        correct = tf.cast(correct, tf.float16)
        accuracy = tf.reduce_mean(correct)
        tf.summary.scalar(scope.name + '/accuracy', accuracy)
    return accuracy

将模型构建好之后,我们就可以直接开始训练了。将训练轮数设置为50000次,学习率设为0.0001,开始训练:

#训练模型
import os
import numpy as np
import tensorflow as tf
import dataset.make_batch as input_data
import model.cnn_model as model
from tensorflow.contrib.layers import l2_regularizer


N_CLASSES = 7
IMG_W = 48
IMG_H = 48
TRAIN_BATCH_SIZE = 32
VALIDATION_BATCH_SIZE = 100
CAPACITY = 256
MAX_STEP = 50000
LEARNING_RATE = 0.0001
REGULARIZATION_RATE = 0.0001

train_dir = "D:/fer2013/train/"
logs_train_dir = "C:/Users/Desktop/train/log/train/"
logs_validation_dir = "D:/fer2013/val/"

train, train_label = input_data.get_file(file_dir=train_dir)
validation, validation_label = input_data.get_file(file_dir=logs_validation_dir)

train_batch, train_label_batch = input_data.get_batch(train, train_label,
                                                      IMG_W, IMG_H,
                                                      TRAIN_BATCH_SIZE, CAPACITY)
validation_batch, validation_label_batch = input_data.get_batch(validation, validation_label,
                                                                IMG_W, IMG_H,
                                                                VALIDATION_BATCH_SIZE, CAPACITY)

regularizer = l2_regularizer(REGULARIZATION_RATE)

train_logits_op = model.inference(images=train_batch, batch_size=TRAIN_BATCH_SIZE, n_classes=N_CLASSES,
                                  regularizer=regularizer, reuse=False)

validation_logits_op = model.inference(images=validation_batch, batch_size=VALIDATION_BATCH_SIZE, n_classes=N_CLASSES,
                                       regularizer=None, reuse=True)

train_losses_op = model.losses(logits=train_logits_op, labels=train_label_batch)

validation_losses_op = model.losses(logits=validation_logits_op, labels=validation_label_batch)

train_op = model.training(train_losses_op, learning_rate=LEARNING_RATE)

train_accuracy_op = model.evaluation(logits=train_logits_op, labels=train_label_batch)

validation_accuracy_op = model.evaluation(logits=validation_logits_op, labels=validation_label_batch)

summary_op = tf.summary.merge_all()

with tf.Session() as sess:
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph, max_queue=3)
    val_writer = tf.summary.FileWriter(logs_validation_dir, sess.graph, max_queue=3)
    Saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer())

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, train_loss, train_accuracy = sess.run([train_op, train_losses_op, train_accuracy_op])
            if step % 100 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f' % (step, train_loss, train_accuracy * 100.0))
                summery_str = sess.run(summary_op)
                train_writer.add_summary(summery_str, step)
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                Saver.save(sess, checkpoint_path, global_step=step)
            if step % 500 == 0 or (step + 1) == MAX_STEP:
                val_loss, val_accuracy = sess.run([validation_losses_op, validation_accuracy_op])
                print('** step %d, val loss = %.2f, val accuracy = %.2f' % (step, val_loss, val_accuracy * 100.0))
                summery_str = sess.run(summary_op)
                val_writer.add_summary(summery_str, step)

    except tf.errors.OutOfRangeError:
        print("Done training -- epoch limit reached")
    finally:
        coord.request_stop()
在训练50000轮后,训练的准确率达到了95%左右,验证准确率也到达93%左右,算是一个可以接受的值了。

猜你喜欢

转载自blog.csdn.net/labPqsdr/article/details/80715464