arc_loss
import torch as t
import torch.nn as nn
import torch.nn.functional as F
class ArcLoss(nn.Module):
def __init__(self, class_num, feature_num, s=10, m=0.1):
super().__init__()
self.class_num = class_num
self.feature_num = feature_num
self.s = s
self.m = t.tensor(m)
self.w = nn.Parameter(t.rand(feature_num, class_num)) # 2*10
def forward(self, feature):
feature = F.normalize(feature, dim=1) # 128*2
w = F.normalize(self.w, dim=0) # 2*10
cos_theat = t.matmul(feature, w) / 10
sin_theat = t.sqrt(1.0 - t.pow(cos_theat, 2))
cos_theat_m = cos_theat * t.cos(self.m) - sin_theat * t.sin(self.m)
cos_theat_ = t.exp(cos_theat * self.s)
sum_cos_theat = t.sum(t.exp(cos_theat * self.s), dim=1, keepdim=True) - cos_theat_
top = t.exp(cos_theat_m * self.s)
divide = (top / (top + sum_cos_theat))
# a = torch.acos(cos_theat)
# top = torch.exp(( torch.cos(a + 0.1)) * 10)
# _top = torch.exp(( torch.cos(a)) * 10)
# bottom = torch.sum(torch.exp(cos_theat * 10), dim=1).view(-1, 1)
#
# divide = (top / (bottom - _top + top)) + 1e-10 ##n,10
return divide
# 以上两种写法逻辑上是一样的,但试验效果不同(反函数求出theat然后直接代入公式的收敛效果略优)
new_net
import torch as t
import torchvision as tv
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter
import torch.optim.lr_scheduler as lr_scheduler
import os
Batch_Size = 128
train_data = tv.datasets.MNIST(
root="./mnist",
train=True,
download=False,
transform=tv.transforms.Compose([tv.transforms.ToTensor(),
tv.transforms.Normalize((0.1307,), (0.3081,))]))
train_loader = data.DataLoader(train_data, batch_size=Batch_Size, shuffle=True, drop_last=True, num_workers=8)
class TrainNet(nn.Module):
def __init__(self):
super().__init__()
self.hidden_layer = nn.Sequential(
nn.Conv2d(1, 64, 3, 2, 1),
nn.BatchNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 256, 3, 2, 1),
nn.BatchNorm2d(256),
nn.PReLU(),
nn.Conv2d(256, 256, 3, 1, 1),
nn.BatchNorm2d(256),
nn.PReLU(),
nn.Conv2d(256, 64, 3, 1, 1),
nn.BatchNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 16, 3, 2, 1),
nn.PReLU())
self.linear_layer = nn.Linear(16 * 4 * 4, 2)
self.output_layer = nn.Linear(2, 10, bias=False)
def forward(self, xs):
feat = self.hidden_layer(xs)
# print(feature.shape)
fc = feat.reshape(-1, 16 * 4 * 4)
# print(fc.data.size())
feature = self.linear_layer(fc)
output = self.output_layer(feature)
return feature, F.log_softmax(output, dim=1)
def decet(feature, targets, epoch, save_path):
color = ["red", "black", "yellow", "green", "pink", "gray", "lightgreen", "orange", "blue", "teal"]
cls = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
plt.ion()
plt.clf()
for j in cls:
mask = [targets == j]
feature_ = feature[mask].numpy()
x = feature_[:, 1]
y = feature_[:, 0]
label = cls
plt.plot(x, y, ".", color=color[j])
# print(x,y)
plt.legend(label, loc="upper right") # 如果写在plot上面,则标签内容不能显示完整
plt.title("epoch={}".format(str(epoch + 1)))
plt.savefig('{}/{}.jpg'.format(save_path, epoch + 1))
plt.draw()
plt.pause(0.01)
train
from new_net import *
from arc_loss import ArcLoss
weight = 1
save_path = r".\model_data\{}\train{}.pth"
save_pic_path = "img17"
if __name__ == '__main__':
net = TrainNet()
device = t.device("cuda:0" if t.cuda.is_available() else "cpu")
arcloss = ArcLoss(10, 2).to(device)
# crossloss = nn.CrossEntropyLoss().to(device)
nllloss = nn.NLLLoss(reduction="sum").to(device) # 如果reduction="mean"则效果略差
optmizer = t.optim.SGD(net.parameters(), lr=0.0001, momentum=0.9, weight_decay=0.0005)
scheduler = lr_scheduler.StepLR(optmizer, 20, gamma=0.8)
optmizerarc = t.optim.Adam(arcloss.parameters())
# if os.path.exists(save_path):
# net.load_state_dict(t.load(save_path))
net = net.to(device)
for epoch in range(15000):
scheduler.step()
feat = []
target = []
for i, (x, y) in enumerate(train_loader):
x, y = x.to(device), y.to(device)
xs, ys = net(x)
value = t.argmax(ys, dim=1)
arc_loss = t.log(arcloss(xs))
nll_loss = nllloss(ys, y)
arcface_loss = nllloss(arc_loss, y)
loss = nll_loss + arcface_loss
acc = t.sum((value == y).float()) / len(y)
# loss = crossloss(arc_loss,y)
optmizer.zero_grad()
optmizerarc.zero_grad()
loss.backward()
optmizer.step()
optmizerarc.step()
feat.append(xs) # 为画图预加载数据,提速
target.append(y)
if i % 100 == 0:
print(epoch, i, loss.item())
print("acc", acc.item())
if acc.item()==1:
print("dd")
t.save(net,"newt.pth")
# print(value[0].item(), "========>", y[0].item())
# if (epoch + 1) % 1 == 0:
# t.save(net.state_dict(), save_path.format(r"D:\PycharmProjects\center_loss\data", str(epoch)))
features = t.cat(feat, 0)
targets = t.cat(target, 0)
decet(features.data.cpu(), targets.data.cpu(), epoch, save_pic_path)
# write.add_histogram("loss",loss.item(),count)