交叉熵损失理解(代码对比版)

import torch
from torch.nn.functional import cross_entropy
import numpy as np
import random


def fix_random_seed(seed):
    # 设置 seed保证每次初始化相同
    np.random.seed(seed)
    torch.manual_seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
    print("seed 设置成功!")


fix_random_seed(2021)

p = torch.randn([2, 3])
t = torch.tensor([2, 1])

print(p)
print(t)
# 交叉熵损失
print("loss1:", cross_entropy(p, t, reduction="mean").item())  # loss1: 2.6753830909729004
# -------------------------------------------------------
# 手动实现 交叉熵损失
p = torch.softmax(p, dim=-1)
gold_probs = torch.gather(p, 1, t.unsqueeze(1)).squeeze()
step_loss = torch.mean(-torch.log(gold_probs))
print("loss2:", step_loss)

result:

我们发现 这两个方法的计算结果相同, 初学者可以理解一下。

猜你喜欢

转载自blog.csdn.net/q506610466/article/details/118090895
今日推荐