pytorch实现异或运算

main.py

from torch import nn
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = nn.Sequential(nn.Linear(2,20),nn.ReLU(),nn.Linear(20,1),nn.Sigmoid()).to(device)
optimizer = torch.optim.SGD(net.parameters(),lr=0.05)
loss_func = nn.MSELoss()
x = [[0,0],[0,1],[1,0],[1,1]]
y = [[0],[1],[1],[0]]
x_tensor = torch.tensor(x).float().to(device)
y_tensor = torch.tensor(y).float().to(device)
for epoch in range(5000):
    out = net(x_tensor)
    loss = loss_func(out,y_tensor)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    if epoch % 1000 ==0:
        print(f'迭代次数:{epoch}')
        print(f'误差:{loss}')



out = net(x_tensor).cpu()
print(f'out:{out.data}')
torch.save(net, './net.pkl')


ceshi.py

from torch import nn
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


x= [[1,1]]
x_tensor = torch.tensor(x).float().to(device)

net = torch.load("./net.pkl")
out = net(x_tensor).cpu()
if out>0.5:
    outfinal = 1
else:
    outfinal = 0

print(outfinal)

猜你喜欢

转载自blog.csdn.net/qzzzxiaosheng/article/details/115586612