SoftMax与交叉熵损失

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/u010472607/article/details/82629262

快速跳转位置


作为分类任务重最常用的激活函数, SoftMax是如何进行计算, 以及对应的交叉熵损失如何计算?

先来一份代码, 支持IDE断点调试

# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import time
import argparse
import math

import tensorflow as tf
from tensorflow.keras.utils import Sequence
import numpy as np

"""两种方式加载的数据集不同图像部分数据是不同的,
official.mnist: 加载的图像是uint8数据类型编码, /255. 需要归一化
tensorflow.examples.tutorials.mnist 是float类型编码, 无需归一化操作
"""
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Model
from tensorflow.keras.layers import MaxPooling2D, Conv2D, Input, Flatten, Dense, Dropout, Layer

# 立即执行模式
tf.enable_eager_execution()

"""
Sequence生成器的方法
__iter__()
__getitem__(index)
支持batch
"""


class DataGenerator(Sequence):
    # 自定义数据集加载方式,
    # 传入数据可以是文件列表或是其他格式,实现相应的加载和预处理方法即可
    def __init__(self, x, y, batch_size=32, shuffle=True):
        self.batch_size = batch_size
        self.x, self.y = x, y
        # 索引重排
        self.indexes = np.arange(len(self.x))
        self.shuffle = shuffle

    def __len__(self):
        # 计算每一个epoch的迭代次数
        return math.ceil(len(self.x) / float(self.batch_size))

    def __getitem__(self, index):
        # 生成每个batch数据,这里就根据自己对数据的读取方式进行发挥了
        # 生成batch_size个索引
        batch_indexs = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
        # 根据索引获取数据集合中的数据
        x, y = self.data_generation(batch_indexs)
        return x, y

    def on_epoch_end(self):
        # 在每一次epoch结束是否需要进行一次随机,重新随机一下index
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def data_generation(self, batch_indexs):
        x = self.x[batch_indexs]
        y = self.y[batch_indexs]
        return x, y


class MySoftMaxLayer(Layer):
    def __init__(self, **kwargs):
        super(MySoftMaxLayer, self).__init__(**kwargs)

    def build(self, input_shape):
        # SoftMax层不需要定义任何参数
        # 所以这里不需要定义任何训练参数

        # 一定调用父类方法 在最后!!!
        super(MySoftMaxLayer, self).build(input_shape)

    def call(self, logits, **kwargs):
        # 前向传播计算
        # (200,10) / (200, 1)
        # tf.expand_dims 扩展维度
        # tf.squeeze 压缩维度
        # return tf.exp(inputs) / tf.expand_dims(tf.reduce_sum(tf.exp(inputs), axis=-1), axis=-1)
        # 等同于
        return tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis=-1, keep_dims=True)

    def compute_output_shape(self, input_shape):
        # 在指定输入下的输出
        # TensorShape类型
        return input_shape


class LeNet(Model):
    def __init__(self, input_shape=(28, 28, 1), num_classes=10):
        # super(LeNet, self).__init__(name="LeNet")
        self.num_classes = num_classes
        ''' 定义要用到的层 layers '''
        # 输入层
        img_input = Input(shape=input_shape)

        # Conv => ReLu => Pool
        x = Conv2D(filters=20, kernel_size=5, padding="same", activation="relu", name='block1_conv1')(img_input)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block1_pool')(x)
        # Conv => ReLu => Pool
        x = Conv2D(filters=50, kernel_size=5, padding="same", activation="relu", name='block1_conv2')(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block1_poo2')(x)
        # 压成一维
        x = Flatten(name='flatten')(x)
        # 全连接层
        x = Dense(units=500, activation="relu", name="f1")(x)
        # softmax分类器
        # x = Dense(units=num_classes, activation="softmax", name="prediction")(x)
        x = Dense(units=num_classes)(x)
        x = MySoftMaxLayer(name="my_softmax_layer")(x)
        # 调用Model类的Model(input, output, name="***")构造方法
        super(LeNet, self).__init__(img_input, x, name="LeNet")

def my_softmax_cross_entropy_with_logits(labels, logits):
    # L = -sum(Yj*ln(Pj) = - ln(Pj)
    # Yj 中取值为 [0,1]
    # 标准的交叉熵损失为E = -sum(Yi * ln(Pi)
    return -tf.log(tf.reduce_sum(labels*logits, axis=1))
# 自定义损失函数
def loss(logits, labels):
    return  tf.reduce_mean(my_softmax_cross_entropy_with_logits(labels, logits))
    # softmax_cross_entropy_with_logits 为每一个输入项结果计算一个损失, 返回值arrayList, 长度N=Batch
    # reduce_mean 再求均值
    # return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=logits))


# 自定义评估函数
def compute_accuracy(logits, labels):
    predictions = tf.argmax(input=logits, axis=1, output_type=tf.int64)
    labels = tf.argmax(input=labels, axis=1, output_type=tf.int64)  # tf.cast(x=labels, dtype=tf.int64)
    batch_size = int(logits.shape[0])
    return tf.reduce_sum(tf.cast(tf.equal(predictions, labels), dtype=tf.float32)) / batch_size


def run_mnist_eager(cfg):
    # 自动选择设备
    (device, data_format) = ('/gpu:0', 'channels_last')
    if not tf.test.is_gpu_available():
        (device, data_format) = ('/cpu:0', 'channels_first')

    print('Using device %s, and data format %s.' % (device, data_format))
    # 载入数据集
    train_ds, test_ds = load_mnist()  # shape = (?, 768) / (?)

    # 创建 model and optimizer
    model = LeNet()
    optimizer = tf.train.MomentumOptimizer(cfg.lr, cfg.momentum)
    model.compile(optimizer=optimizer,
                  loss=loss,  # 'categorical_crossentropy',
                  metrics=[compute_accuracy])  # ['accuracy']


    # Generator 使用生成器方式提供数据,支持eager模式
    train_ds = DataGenerator(train_ds[0], train_ds[1], batch_size=200)
    test_ds = DataGenerator(test_ds[0], test_ds[1], batch_size=200)
    # Train and evaluate for a set number of epochs.
    with tf.device(device):  # 使用GPU必须有此一句
        for _ in range(cfg.train_epochs):
            start = time.time()

            model.fit_generator(generator=train_ds, epochs=1)
            # model.fit(train_ds[0], train_ds[1], batch_size=200, epochs=1)
            # verbose=0 不显示
            # 生成器
            # _loss, _acc = model.evaluate(test_ds[0], test_ds[1], batch_size=100, verbose=0)
            # _loss, _acc = model.evaluate_generator(generator=test_ds, steps=10000)
            #
            _loss, _acc = model.evaluate_generator(generator=test_ds, verbose=1)
            print("test dataset loss: %f acc: %f" % (_loss, _acc))
            # train(model, optimizer, train_ds, step_counter, cfg.log_interval)
            end = time.time()
            print('\nTrain time for epoch #%d (%d total steps): %f' %
                  (_, len(train_ds), end - start))



def arg_parse():
    """参数定义"""
    parser = argparse.ArgumentParser(description="Lenet-5 MNIST 模型")
    parser.add_argument("--lr", dest="lr", help="学习率", default=0.01, type=float)
    parser.add_argument("--momentum", dest="momentum", help="SGD momentum.", default=0.5)

    parser.add_argument("--data_dir", dest="data_dir", help="数据集下载/保存目录", default="data/mnist/input_data/")
    parser.add_argument("--model_dir", dest="model_dir", help="模型保存目录", default="data/mnist/checkpoints/")
    parser.add_argument("--batch_size", dest="batch_size", help="训练或测试时 Batch Size", default=100, type=int)
    parser.add_argument("--train_epochs", dest="train_epochs", help="训练时epoch迭代次数", default=4, type=int)
    parser.add_argument("--log_interval", dest="log_interval", help="日志打印间隔", default=10, type=int)

    # 返回转换好的结果
    return parser.parse_args()


def load_mnist():
    # 加载数据,转换编码格式并归一化
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.astype("float32") / 255.
    x_test = x_test.astype("float32") / 255.

    # 扩展1维, 等效写法
    x_train = x_train[:, :, :, None]
    x_test = x_test[:, :, :, None]

    print(x_train.shape, "train shape")
    print(x_test.shape, "test shape")

    y_train = to_categorical(y_train, 10)
    y_test = to_categorical(y_test, 10)
    train_ds = (x_train, y_train)
    test_ds = (x_test, y_test)

    return train_ds, test_ds


if __name__ == '__main__':
    args = arg_parse()
    run_mnist_eager(args)


其中: 我们关注的核心的是my_softmax_cross_entropy_with_logits函数和 MySoftMaxLayer


SoftMax计算公式

对于维度(T,1)的输入, SoftMax的计算第j类概率(0-1之间的一个浮点值)公式
S j = e a j k = 1 T e a k {S_j} = \frac{{{e^{{a_j}}}}}{{\sum\limits_{k = 1}^T {{e^{{a_k}}}} }}

MySoftMaxLayerbuild函数中是批量输入计算softmax分类实现, 最核心的代码如下

return tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis=-1, keep_dims=True)

交叉熵损失

对于标准的交叉熵损失, 可以用下面的公式去表示
L = j = 1 T Y j ln P j L = - \sum\limits_{j = 1}^T {{Y_j}\ln {P_j}}
其中Pj表示属于第j类的概率, Yj表示第j类对应的参数值


SoftMax交叉熵损失

SoftMax的交叉熵损失标准形式如下,
L = j = 1 T Y j ln P j L = - \sum\limits_{j = 1}^T {{Y_j}\ln{P_j}}
其中Y 是一组多个0和一个1组成的one-hot编码, P是预测属于每个类别的概率, 因为Y只有一个位置处为1,假设为j, 因此,公式可以写成如下形式
L = ln P j L = - \ln {P_j}

核心代码如下(批输入损失计算)

def my_softmax_cross_entropy_with_logits(labels, logits):
    # L = -sum(Yj*Ln(Pj) = - Ln(Pj)
    # Yj 中取值为 [0,1]
    # 标准的交叉熵损失为E = -sum(Yi * ln(Pi)
    return -tf.log(tf.reduce_sum(labels*logits, axis=1))

SoftMax交叉熵损失梯度求导

转载自: https://blog.csdn.net/qian99/article/details/78046329
首先,我们要明确一下我们要求什么,我们要求的是我们的loss对于神经元输出(zi)的梯度,即:
C z i \frac{{\partial C}}{{\partial {z_i}}}

根据复合函数求导法则:
C z i = C s j s j z i \frac{{\partial C}}{{\partial {z_i}}} = \frac{{\partial C}}{{\partial {s_j}}} \frac{{\partial {s_j}}}{{\partial {z_i}}}

有个人可能有疑问了,这里为什么是   s j \ {{s_j}} 而不是   s i \ {{s_i}} (s为经过softmax运算后的结果, 即归属每一类的概率),这里要看一下softmax的公式了,因为softmax公式的特性,它的分母包含了所有神经元的输出,所以,对于不等于i的其他输出里面,也包含着   z i \ {{z_i}} ,所有的s都要纳入到计算范围中,并且后面的计算可以看到需要分为i=ji≠j两种情况求导。

下面我们一个一个推:
C s j = ( j y j ln s j ) s j = j y j 1 s j \frac{{\partial C}}{{\partial {s_j}}} = \frac{{\partial ( - \sum\limits_j {{y_j}\ln } {s_j})}}{{\partial {s_j}}} = - \sum\limits_j {{y_j}\frac{1}{{{s_j}}}}

第二部分稍微复杂一点,我们先把它分为两种情况:

①如果i=j:
s i z i = ( e z i k e z k ) z i = k e z k e z i ( e z i ) 2 ( k e z k ) 2 = ( e z i k e z k ) ( 1 e z i k e z k ) = s i ( 1 s i ) {{\partial \mathop {\rm{s}}\nolimits_{\rm{i}} } \over {\partial \mathop z\nolimits_i }} = {{\partial ({{\mathop e\nolimits^{\mathop z\nolimits_i } } \over {\sum\nolimits_k {\mathop e\nolimits^{\mathop z\nolimits_k } } }})} \over {\partial \mathop z\nolimits_i }} = {{\sum\nolimits_k {\mathop e\nolimits^{\mathop z\nolimits_k } } \mathop e\nolimits^{\mathop z\nolimits_i } - \mathop {(\mathop e\nolimits^{\mathop z\nolimits_i } )}\nolimits^2 } \over {\mathop {(\sum\nolimits_k {\mathop e\nolimits^{\mathop z\nolimits_k } } )}\nolimits^2 }} = ({{\mathop e\nolimits^{\mathop z\nolimits_i } } \over {\sum\nolimits_k {\mathop e\nolimits^{\mathop z\nolimits_k } } }})(1 - {{\mathop e\nolimits^{\mathop z\nolimits_i } } \over {\sum\nolimits_k {\mathop e\nolimits^{\mathop z\nolimits_k } } }}) = \mathop s\nolimits_i (1 - \mathop s\nolimits_i )

②如果i≠j:
s j z i = ( e z j k e z k ) z i = z j ( 1 k e z k ) 2 e z i = s i s j {{\partial {{\rm{s}}_j}} \over {\partial {\rm{ }}{z_i}}} = {{\partial ({{{\rm{ }}{e^{{\rm{ }}\mathop z\nolimits_j }}} \over {\sum\nolimits_k {{\rm{ }}{e^{{\rm{ }}\mathop z\nolimits_k }}} }})} \over {\partial {\rm{ }}{z_i}}} = - {\rm{ }}{z_j}{({1 \over {\sum\nolimits_k {{\rm{ }}{e^{{\rm{ }}\mathop z\nolimits_k }}} }})^2}{\rm{ }}{e^{{z_i}}} = - {\rm{ }}{s_i}{\rm{ }}{s_j}

ok,接下来我们只需要把上面的组合起来:
C z i = ( j y j 1 s j ) s i z i = y i s i s i ( 1 s i ) + j i y i s j s i s j = y i + y i s i + j i y j s i = y i + s i j y j \frac{{\partial C}}{{\partial {z_i}}} = ( - \sum\limits_j {{y_j}\frac{1}{{{s_j}}}} )\frac{{\partial {s_i}}}{{\partial {z_i}}} = - \frac{{{y_i}}}{{{s_i}}}{s_i}(1 - {s_i}) + \sum\limits_{j \ne i} {\frac{{{y_i}}}{{{s_j}}}} {s_i}{s_j} = - {y_i} + {y_i}{s_i} + \sum\limits_{j \ne i} {{y_j}{s_i} = - } {y_i} + {s_i}\sum\limits_j {{y_j}}

最后的结果看起来简单了很多,最后,针对分类问题,我们给定的结果   y i \ {{y_i}} 最终只会有一个类别是1,其他类别都是0,因此,对于分类问题,这个梯度等于:
C z i = s i y i \frac{{\partial C}}{{\partial {z_i}}} = {{ s_i}} - {{y_i}}

猜你喜欢

转载自blog.csdn.net/u010472607/article/details/82629262