[Original] Python implementa la red neuronal BP para reconocer el conjunto de datos Mnist

Declaración de derechos de autor: este artículo es el artículo original del blogger ExcelMann y no se puede reproducir sin el permiso del blogger.

Python implementa la red neuronal BP para reconocer el conjunto de datos Mnist

Autor: ExcelMann, la reimpresión debe estar marcada.

No hay mucho que decir, publique el código directamente y el código tiene comentarios.

# Author:Xuangan, Xu
# Data:2020-10-28

"""
BP神经网络
-----------------
利用梯度下降法,实现MNIST手写体数字识别
数据集:Mnist数据集
"""

import os
import struct
import math
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf

def load_mnist(path, kind='train'): # kind默认参数值为'train'
    """
        从指定的路径path中读取数据
        :param path:为文件路径
        :param kind:为文件的类型(train/t10k)
        :return images为nxm维的数组,n为样本个数,m为样本的特征数,也即为像素个数;
                labels为images对应的标签;
    """
    labels_path = os.path.join(path,
                               '%s-labels.idx1-ubyte'
                               % kind)
    images_path = os.path.join(path,
                               '%s-images.idx3-ubyte'
                               % kind)
    with open(labels_path, 'rb') as lbpath:
        magic, n = struct.unpack('>II',
                                 lbpath.read(8))
        labels = np.fromfile(lbpath,
                             dtype=np.uint8)

    with open(images_path, 'rb') as imgpath:
        magic, num, rows, cols = struct.unpack('>IIII',
                                               imgpath.read(16))
        images = np.fromfile(imgpath,
                             dtype=np.uint8).reshape(len(labels), 784)

    return images, labels

def sigmoid(x):
    """
    sigmoid函数
    :param x: 输入值
    :return: 返回激活函数的值
    """
    return 1.0/(1.0+np.exp(-x))

# 定义神经网络类
class neuralNetwork:

    def __init__(self,inputNodes,hiddenNodes,outputNodes,learningRate):
        """
        :param inputNodes:输入层节点个数
        :param hiddenNodes:隐藏层结点个数
        :param outputNodes:输出层结点个数
        :param learningRate:学习率
        """
        self.iNodes = inputNodes
        self.hNodes = hiddenNodes
        self.oNodes = outputNodes
        self.lr = learningRate
        # 初始化网络权重
        self.w_1 = np.random.uniform(-0.5,0.5,(inputNodes,hiddenNodes))
        self.w_2 = np.random.uniform(-0.5, 0.5, (hiddenNodes,outputNodes))
        # 初始化阈值
        #self.thod_1 = np.random.randn(hiddenNodes)
        self.thod_1 = np.random.uniform(-0.5,0.5,hiddenNodes)
        #self.thod_2 = np.random.randn(outputNodes)
        self.thod_2 = np.random.uniform(-0.5,0.5,outputNodes)

    def culMse(self,pre_y,y):
        """
        计算均方误差
        :param pre_y: 预测值
        :param y: 期望值
        """
        totalError = 0
        for i in range(len(y)):
            totalError += (y[i]-pre_y[i])**2
        return totalError/2.0

    def culCrossEntropyLoss(self,pre_y,y):
        """
        计算交叉熵损失函数
        :param pre_y: 预测值
        :param y: 期望值
        """
        total_error = 0
        for j in range(len(y)):
            total_error += y[j]*math.log(pre_y[j])
        return (-1)*total_error

    def forward(self,input_data):
        """
        前向传播
        :param input_data:输入数据(1X784的一维数组)
        :return: 返回输出层的数据
        """
        # 计算隐含层的输入值以及输出值(用到了sigmoid激活函数),结果为大小15的数组
        hidden_input = input_data.dot(self.w_1)
        hidden_output = sigmoid(hidden_input-self.thod_1)

        # 计算输出层的输入值以及输出值(用到了sigmoid激活函数),结果为大小10的数组
        final_input = hidden_output.dot(self.w_2)
        final_output = sigmoid(final_input-self.thod_2)
        return final_output,hidden_output

    def backward(self,target,input_data,hidden_output,final_output):
        """
        反向传播算法
        """
        g = np.zeros(self.oNodes)  # 第j个输出层结点对应的广义偏差
        e = np.zeros(self.hNodes)  # 第h个隐藏层结点对应的广义偏差
        # 更新隐藏层与输出层之间的权重w_2
        for h in range(self.hNodes):
            for j in range(self.oNodes):
                # 计算第j个输出层结点对应的广义偏差
                g[j] = (target[j]-final_output[j])*final_output[j]*(1-final_output[j])
                # 计算w_hj的权重梯度
                gradient_w_hj = self.lr*g[j]*hidden_output[h]
                # 梯度下降法更新权重参数值
                self.w_2[h][j] = self.w_2[h][j]+gradient_w_hj

        # 更新输出层的阈值
        for j in range(self.oNodes):
            # 计算第j个输出层结点的阈值梯度
            gradient_thod_j = (-1) * self.lr * g[j]
            # 梯度下降法更新阈值参数
            self.thod_2[j] = self.thod_2[j] + gradient_thod_j

        # 求第h个隐藏层结点对应的广义偏差
        for h in range(self.hNodes):
            totalBackValue = 0
            for j in range(self.oNodes):
                totalBackValue += self.w_2[h][j]*g[j]
            e[h] = hidden_output[h]*(1-hidden_output[h])*totalBackValue

        # 更新输入层与隐藏层之间的权重w_1
        for i in range(self.iNodes):
            for h in range(self.hNodes):
                # 计算w_ih的权重梯度
                gradient_w_ih = self.lr*e[h]*input_data[i]
                # 梯度下降法更新权重参数值
                self.w_1[i][h] = self.w_1[i][h]+gradient_w_ih

        # 更新隐藏层的阈值
        for h in range(self.hNodes):
            # 计算第h个隐藏层结点的阈值梯度
            gradient_thod_h = (-1)*self.lr*e[h]
            # 梯度下降法更新阈值参数
            self.thod_1[h] += gradient_thod_h

    def train(self,input_data,target):
        """
        训练网络参数
        :param input_data:输入数据(1X784的一维数组)
        :param target:标签数组(1X10的一维数组)
        """
        final_output,hidden_output = self.forward(input_data)

        self.backward(target,input_data,hidden_output,final_output)

        return final_output


    def estimate(self,test_data,test_label):
        """
        预测结果
        :param test_data: 输入数据,nX784维,n为输入数据个数
        :param test_label: 测试数据的标签值
        :return: 返回准确率
        """
        correct_num = 0 # 预测正确个数
        for i in range(test_data.shape[0]):
            # 计算得到预测结果,preV为网络模型输出值
            preV,hiddenV = self.forward(test_data[i])
            pre_y = np.argmax(preV)  # 最大可能性的即为预测的值
            label = np.argmax(test_label[i])
            # 预测结果与标签值对比,计算准确率
            if(pre_y == label):
                correct_num += 1
        return correct_num/test_data.shape[0]

    def SGD(self,train_data,train_label):
        # 定义迭代次数epochs,并执行训练过程
        epochs = 200
        # 批处理的量大小
        batch_size = 200
        for e in range(epochs):
            # 从样本中随机挑选出100个样本作为训练集
            batch_mask = np.random.choice(train_data.shape[0], batch_size)
            batch_data = train_data[batch_mask]
            batch_label = train_label[batch_mask]
            # 遍历批处理样本
            for i, data in enumerate(batch_data):
                # 执行模型训练
                final_output = self.train(data, batch_label[i])
                if (i % 40 == 0):
                    # 计算loss
                    mse = self.culMse(final_output, batch_label[i])
                    print(f'epoch:{e},i:{i},loss:{mse}')

if __name__ == "__main__":
    # 通过tensorflow读取mnist数据,并对读取到的数据进行处理
    mnist = tf.keras.datasets.mnist
    (train_x,train_y),(test_x,test_y) = mnist.load_data()
    # 创建以下数组,用于存储处理后的训练和测试数据
    train_data = np.zeros((60000,784))
    train_label = np.zeros((60000,10))
    test_data = np.zeros((10000,784))
    test_label = np.zeros((10000,10))
    # 处理数据,使得图像数据的值范围为0-1,并将标签改为one-hot类型
    for i in range(60000):  # 处理训练数据
        train_data[i] = (np.array(train_x[i]).flatten())/255
        temp = np.zeros(10)
        temp[train_y[i]] = 1
        train_label[i] = temp
    for i in range(10000):  # 处理测试数据
        test_data[i] = (np.array(test_x[i]).flatten())/255
        temp = np.zeros(10)
        temp[test_y[i]] = 1
        test_label[i] = temp

    # 初始化神经网络结点个数和学习率
    input_nodes = 784
    hidden_nodes = 15
    output_nodes = 10
    learningRate = 0.15
    # 创建神经网络对象network
    network = neuralNetwork(input_nodes,hidden_nodes,output_nodes,learningRate)
    # 执行随机梯度下降算法
    network.SGD(train_data,train_label)
    
    # 测试阶段,输出精确率
    accuracy = network.estimate(test_data,test_label)
    print(f'test_data_Accuracy:{accuracy}')

Supongo que te gusta

Origin blog.csdn.net/a602389093/article/details/109613565
Recomendado
Clasificación