Day02-运用百度paddle进行手势识别

百度的飞浆框架从16年开源到现在也又四年时光了,经过四年的打磨,这款框架也展现出了不俗的实力,这次文章我就为大家讲讲这么运用paddle实现手势识别,为什么不是minist入门数据呢,因为minist的数据都是各个框架默认集成的,仅仅只需简单的导入就完成了,而一般我们的项目都是实打实的图片和标签,所以以手势识别讲paddle更加专业有效果

深度学习不外乎四个步骤:

1. 数据标签处理
2. 构建网络模型
3. 规划网络超参
4. 训练评估模型

我将分四个步骤为大家讲解,先导入库

import os
import time
import random
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from multiprocessing import cpu_count
from paddle.fluid.dygraph import Pool2D,Conv2D
from paddle.fluid.dygraph import Linear

一数据标签处理

paddle为大家准备的数据集是0-9的手势,每个手势有200+张彩色图片,分辨率为100x100.数据集链接

首先是数据处理

# 生成图像列表
data_path = 'Dataset'#这里是你的数据集路径
character_folders = os.listdir(data_path)
# print(character_folders)
if(os.path.exists('./train_data.list')):
    os.remove('./train_data.list')
if(os.path.exists('./test_data.list')):
    os.remove('./test_data.list')
    
for character_folder in character_folders:
    
    with open('./train_data.list', 'a') as f_train:
        with open('./test_data.list', 'a') as f_test:
            if character_folder == '.DS_Store':
                continue
            character_imgs = os.listdir(os.path.join(data_path,character_folder))
            count = 0 
            for img in character_imgs:
                if img =='.DS_Store':
                    continue
                if count%10 == 0:
                    f_test.write(os.path.join(data_path,character_folder,img) + '\t' + character_folder + '\n')
                else:
                    f_train.write(os.path.join(data_path,character_folder,img) + '\t' + character_folder + '\n')
                count +=1
print('列表已生成')

讲图片路径及文件夹标签写到list文件方便读取,接下来使用paddle的reader模块制作训练集和测试集

# 定义训练集和测试集的reader
def data_mapper(sample):
    img, label = sample
    img = Image.open(img)
    img = img.resize((100, 100), Image.ANTIALIAS)
    img = np.array(img).astype('float32')
    img = img.transpose((2, 0, 1))
    img = img/255.0
    return img, label

def data_reader(data_list_path):
    def reader():
        with open(data_list_path, 'r') as f:
            lines = f.readlines()
            for line in lines:
                img, label = line.split('\t')
                yield img, int(label)
    return paddle.reader.xmap_readers(data_mapper, reader, cpu_count(), 512)

# 用于训练的数据提供器
#buf_size是打乱数据集的参数,size越大,图片顺序越乱
train_reader = paddle.batch(reader=paddle.reader.shuffle(reader=data_reader('./train_data.list'), buf_size=1024), batch_size=32)
# 用于测试的数据提供器
test_reader = paddle.batch(reader=data_reader('./test_data.list'), batch_size=32) 

构建神经网络

这里我们以典型的AlexNet构建我们的神经网络结构
构建代码如下:

#定义DNN网络
class MyDNN(fluid.dygraph.Layer):
    def __init__(self, name_scope, num_classes=10):
        super(MyDNN, self).__init__(name_scope)
        name_scope = self.full_name()

        self.conv1 = Conv2D(num_channels=3, num_filters=96, filter_size=11, stride=4, padding=5, act='relu')
        self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv2 = Conv2D(num_channels=96, num_filters=256, filter_size=5, stride=1, padding=2, act='relu')
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv3 = Conv2D(num_channels=256, num_filters=384, filter_size=3, stride=1, padding=1, act='relu')
        self.conv4 = Conv2D(num_channels=384, num_filters=384, filter_size=3, stride=1, padding=1, act='relu')
        self.conv5 = Conv2D(num_channels=384, num_filters=256, filter_size=3, stride=1, padding=1, act='relu')
        self.pool5 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.fc1 = Linear(input_dim=9216, output_dim=4096, act='relu')
        self.drop_ratio1 = 0.5
        self.fc2 = Linear(input_dim=4096, output_dim=4096, act='relu')
        self.drop_ratio2 = 0.5
        self.fc3 = Linear(input_dim=4096, output_dim=num_classes)

        
    def forward(self, x):

        x = self.conv1(x)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.pool2(x)
        x = self.conv3(x)
        x = self.conv4(x)
        x = self.conv5(x)
        x = self.pool5(x)
        x = fluid.layers.reshape(x, [x.shape[0], -1])
        x = self.fc1(x)
        # 在全连接之后使用dropout抑制过拟合
        x= fluid.layers.dropout(x, self.drop_ratio1)
        x = self.fc2(x)
        # 在全连接之后使用dropout抑制过拟合
        x = fluid.layers.dropout(x, self.drop_ratio2)
        x = self.fc3(x)
        return x

由于paddle的高度封装,我们的Alex net网络结构看着很简洁易懂

规划网络超参

paddle训练是以动态图训练,更加灵活

#用动态图进行训练

with fluid.dygraph.guard():
    model=MyDNN('Alexnet') #模型实例化
    model.train() #训练模式
    opt = fluid.optimizer.Momentum(learning_rate=0.001,momentum=0.9,parameter_list=model.parameters())
    epochs_num=50 #迭代次数
    
    for pass_num in range(epochs_num):
        
        for batch_id,data in enumerate(train_reader()):
            
            images=np.array([x[0].reshape(3,100,100) for x in data],np.float32)
            
            labels = np.array([x[1] for x in data]).astype('int64')
            labels = labels[:, np.newaxis]
            image=fluid.dygraph.to_variable(images)
            label=fluid.dygraph.to_variable(labels)
            predict=model(image)#预测
            loss=fluid.layers.softmax_with_cross_entropy(predict,label)
            avg_loss=fluid.layers.mean(loss)#获取loss值
            
            acc=fluid.layers.accuracy(predict,label)#计算精度
            if batch_id!=0 and batch_id%50==0:
                print("train_pass:{},batch_id:{},train_loss:{},train_acc:{}".format(pass_num,batch_id,avg_loss.numpy(),acc.numpy()))
            
            avg_loss.backward()
            opt.minimize(avg_loss)
            model.clear_gradients()
            
    fluid.save_dygraph(model.state_dict(),'MyDNN')#保存模型

简简单单是不是~,paddle的模型有train,eval两种模式,前者训练时dropout启用,后者关闭,接下来就是评估模型了

with fluid.dygraph.guard():
    accs = []
    model_dict, _ = fluid.load_dygraph('MyDNN')
    model = MyDNN('Alexnet')
    model.load_dict(model_dict) #加载模型参数
    model.eval() #训练模式
    for batch_id,data in enumerate(test_reader()):#测试集
        images=np.array([x[0].reshape(3,100,100) for x in data],np.float32)
        labels = np.array([x[1] for x in data]).astype('int64')
        labels = labels[:, np.newaxis]

        image=fluid.dygraph.to_variable(images)
        label=fluid.dygraph.to_variable(labels)
        
        predict=model(image)       
        acc=fluid.layers.accuracy(predict,label)
        accs.append(acc.numpy()[0])
        avg_acc = np.mean(accs)
    print(avg_acc)

到此整个训练过程就结束了~
我训练了五十个epoch的结果是96%,大家可以尝试着更高的精度,比如换不同的优化器,多训练几个epoch,增大图像分辨率等等

心得

paddle深度学习框架对于新手来讲是一个不错的选择,作为国内的第一款开源框架,paddle有个更好的国内生态,模型,代码,框架等都可以很方便的在国内下载,还有aistudio这种云平台面去初学者搭建环境的痛苦,现在还有有gpu资源,不愧是IT三巨头,壕无人性~
不过缺点也是有的,框架本身的错误代码提示不友好,有时候不知所云,有些api封装的太“高级”了,让人不明白这一步的逻辑情况
希望paddle努力加强自身,继续打磨这款国产框架!

发布了6 篇原创文章 · 获赞 8 · 访问量 969

猜你喜欢

转载自blog.csdn.net/weixin_43865152/article/details/105271072