【手把手教学】人脸识别和关键点检测(基于tensorflow和opencv)(二)

1. 前言

我们上一章已经写好了人脸检测程序并下载好了数据集:
在这里插入图片描述
将数据集解压到文件夹:
在这里插入图片描述

在这里插入图片描述这一篇我们就搭建一个CNN模型,来训练一个关键点检测的模型;

2. 读取数据

2.1 代码 read_data.py

from random import shuffle
import pandas as pd
import numpy as np
import pickle
import cv2
import os
from random import randint


class Reader(object):

    def __init__(self):

        self.train_csv = './training/training.csv'

        self.test_csv = './test/test/csv'

        self.cursor = 0

        self.names_path = './names.txt'

        self.data_path = './data.pkl'

        self.train_image_path = './train_image'

        self.size = 96

        if not os.path.exists(self.train_image_path):

            os.makedirs(self.train_image_path)

            self.data = self.pre_process()

        else:

            with open(self.data_path, 'rb') as f:

                self.data = pickle.load(f)

        print('There is {} in total data.'.format(len(self.data)))

        shuffle(self.data)

        with open(self.names_path, 'r') as f:

            self.names = f.read().splitlines()

        self.data_num = len(self.data)

        self.label_num = len(self.names)

    def pre_process(self):

        data = pd.read_csv(self.train_csv)
        data = data.dropna()

        cols = data.columns[:-1]

        data = data.to_dict()

        for key, value in data['Image'].items():

            data['Image'][key] = np.fromstring(value, sep=' ')

        data_names = list(data.keys())
        data_names.remove('Image')

        with open(self.names_path, 'w') as f:

            for value in data_names:
                f.writelines(value+'\n')

        labels = []

        for index in data['Image'].keys():

            label = {}

            image = data['Image'][index].reshape((96, 96))
            image_name = 'image_{}.jpg'.format(index)
            image_path = os.path.join(self.train_image_path, image_name)

            cv2.imwrite(image_path, image)

            label['image_path'] = image_path

            for point_name in data_names:
                label[point_name] = data[point_name][index]

            labels.append(label)

        with open(self.data_path, 'wb') as f:
            pickle.dump(labels, f)

        return labels

    def random_flip(self, image, points):

        if randint(0, 1):

            image = np.flip(image, axis=0)
            points[1::2] = 1 - points[1::2]

        return image, points

    def generate(self, batch_size=1):

        images = []
        points = []

        for _ in range(batch_size):

            path = self.data[self.cursor]['image_path']
            image = cv2.imread(path)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

            images.append(image)

            tmp = []
            for key in self.names:

                value = self.data[self.cursor][key]
                tmp.append(value)

            points.append(tmp)

            self.cursor += 1

            if self.cursor >= self.data_num:

                self.cursor = 0
                shuffle(self.data)

        images = np.array(images).reshape(
            (batch_size, self.size, self.size, 1))
        images = images - 127.5

        points = np.array(points)
        points = points/self.size

        # images, points = self.random_flip(images, points)

        return images, points


if __name__ == "__main__":

    import matplotlib.pyplot as plt

    reader = Reader()

    for _ in range(10):

        image, point = reader.generate(1)

        image = np.squeeze(image)
        point = np.squeeze(point)

        image = (image + 127.5).astype(np.int)
        point = (point * 96).astype(np.int)

        result = image.copy()

        y_axis = point[1::2]
        x_axis = point[::2]

        color = (0, 0, 255)

        for y, x in zip(y_axis, x_axis):

            cv2.circle(result, (x, y), 1, color)

        plt.imshow(result)
        plt.show()

2.2 效果

第一次运行会生成一些数据文件,如图:
在这里插入图片描述
然后通过generate生成训练数据:
在这里插入图片描述

3. 训练并保存模型

3.1 代码 network.py

import tensorflow as tf
from read_data import Reader
import os
import numpy as np

slim = tf.contrib.slim


class Net(object):

    def __init__(self, is_training=True):

        self.is_training = is_training

        if self.is_training:

            self.reader = Reader()

        self.batch_size = 16

        self.lr = 2e-4

        self.wd = 5e-3

        self.epoches = 100

        self.batches = 256

        self.size = 96

        self.label_num = 30

        self.x = tf.placeholder(tf.float32, [None, self.size, self.size, 1])

        self.y = tf.placeholder(tf.float32, [None, self.label_num])

        self.y_hat = self.network(self.x)

        self.model_path = './model'

        self.ckpt_path = os.path.join(self.model_path, 'model.ckpt')

        self.saver = tf.train.Saver()

    def loss_layer(self, y, y_hat):

        loss = tf.reduce_sum(tf.square(y - y_hat))

        return loss

    def network(self, inputs):

        with tf.variable_scope('net'):

            with slim.arg_scope([slim.conv2d],
                                activation_fn=tf.nn.relu,
                                weights_regularizer=slim.l2_regularizer(self.wd)):

                # Block init
                net = slim.conv2d(inputs, 1024, [3, 3],
                                  2, scope='conv_init', padding='SAME')

                # Block 1
                net = slim.repeat(net, 2, slim.conv2d,
                                  64, [3, 3], scope='conv1', padding='SAME')
                net = slim.max_pool2d(
                    net, [2, 2], scope='pool1', padding='SAME')

                net = tf.layers.batch_normalization(
                    net, trainable=self.is_training, name='BN_block1')

                # Block 2
                net = slim.repeat(net, 2, slim.conv2d,
                                  128, [3, 3], scope='conv2')
                net = slim.max_pool2d(
                    net, [2, 2], scope='pool2', padding='SAME')

                net = tf.layers.batch_normalization(
                    net, trainable=self.is_training, name='BN_block2')

                # Block 3
                net = slim.repeat(net, 3, slim.conv2d,
                                  256, [3, 3], scope='conv3')
                net = slim.max_pool2d(
                    net, [2, 2], scope='pool3', padding='SAME')

                net = tf.layers.batch_normalization(
                    net, trainable=self.is_training, name='BN_block3')

                # Block 4
                net = slim.repeat(net, 3, slim.conv2d,
                                  512, [3, 3], scope='conv4')
                net = slim.max_pool2d(
                    net, [2, 2], scope='pool4', padding='SAME')

                net = tf.layers.batch_normalization(
                    net, trainable=self.is_training, name='BN_block4')

                # Block 5
                net = slim.repeat(net, 3, slim.conv2d,
                                  512, [3, 3], scope='conv5')

                net = tf.layers.batch_normalization(
                    net, trainable=self.is_training, name='BN_block5')

                # Block 6
                net = slim.conv2d(net, 1024, [3, 3],
                                  2, scope='conv6')

                net = tf.layers.batch_normalization(
                    net, trainable=self.is_training, name='BN_block6')

                net = tf.layers.flatten(net)

                logits = tf.layers.dense(net, self.label_num)

                if self.is_training:

                    logits = tf.layers.dropout(logits)

                # logits = tf.nn.tanh(logits)

                return logits

    def train_net(self):

        if not os.path.exists(self.model_path):
            os.makedirs(self.model_path)

        self.loss = self.loss_layer(self.y, self.y_hat)

        self.optimizer = tf.compat.v1.train.AdamOptimizer(self.lr)

        self.train_step = self.optimizer.minimize(self.loss)

        with tf.Session() as sess:

            sess.run(tf.compat.v1.global_variables_initializer())

            ckpt = tf.train.get_checkpoint_state(self.model_path)

            if ckpt and ckpt.model_checkpoint_path:
                # 如果保存过模型,则在保存的模型的基础上继续训练
                self.saver.restore(sess, ckpt.model_checkpoint_path)
                print('Model Reload Successfully!')

            for epoch in range(self.epoches):

                loss_list = []

                for batch in range(self.batch_size):

                    images, labels = self.reader.generate(self.batch_size)

                    feed_dict = {
                        self.x: images,
                        self.y: labels
                    }

                    loss_value, _ = sess.run(
                        [self.loss, self.train_step], feed_dict)

                    loss_list.append(loss_value)

                loss = np.mean(np.array(loss_list))

                print('epoch:{} loss:{}'.format(epoch, loss))

                with open('./losses.txt', 'a') as f:
                    f.write(str(loss)+'\n')

            self.saver.save(sess, self.ckpt_path)

    def test_net(self, image, sess):

        image = image.reshape((1, self.size, self.size, 1)) - 127.5

        points = sess.run(self.y_hat, feed_dict={self.x: image})

        points = (points * self.size).astype(np.int)

        return np.squeeze(points)


if __name__ == '__main__':

    import cv2
    import matplotlib.pyplot as plt

    net = Net()

    net.train_net()

    with open('./losses.txt', 'r') as f:

        losses = f.read().splitlines()

    losses = [eval(v) for v in losses]

    plt.plot(losses)
    plt.title('loss')
    plt.show()

3.2 训练模型并展示Loss曲线

运行这个程序,就可以开始训练啦:
在这里插入图片描述
训练时会生成相应的文件,其中model文件夹保存了我们训练的模型,losses.txt保存了训练的loss值:

在这里插入图片描述

Loss下降曲线如图:
在这里插入图片描述
在这里插入图片描述

看一下效果还是不错滴~

4. 进度

  • 前期准备
  • 人脸检测
  • 训练模型
  • 在视频中调用模型

下一期我们就讲一下如何将刚刚训练好的模型,应用的我们的人脸检测器中;
手把手教学之——人脸识别与关键点检测(三)调用模型
在这里插入图片描述

如果对你有帮助的话,记得点赞关注哦~

扫描二维码关注公众号,回复: 8538793 查看本文章
发布了58 篇原创文章 · 获赞 117 · 访问量 6809

猜你喜欢

转载自blog.csdn.net/weixin_44936889/article/details/103759798