pytorch BP red neuronal para clasificar las coordenadas de la zona

sección de prueba del código escrito por parte meng Yu Hong dibujo del código

import numpy as np
import scipy.io as scio
import torch 
import torch.nn.functional as Fun
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt

### train
x1=np.arange(100)
y1=np.arange(401)
### test ay[]
x2=np.arange(110,210)
reg1=[]
reg1_2=[]
### the region 1 
for i in x1: 
    for j in y1:
        reg1_2.append([i,j])
        demo1=[i,j,1,0]
        reg1.append(demo1) 
reg2 = []
#print(reg1_2)
# assert False
reg2_2=[]
for i in x2: 
    for j in y1:
        reg2_2.append([i,j])
        demo2=[i,j,0,1]
        reg2.append(demo2) 
print(len(reg1)*0.7, len(reg2)*0.7)

# fig = plt.figure()  
# ax1 = fig.add_subplot(111)
# ax1.scatter(np.array(reg1)[:,0],np.array(reg1)[:,1],c = 'b',marker = 'o')  
# ax1.scatter(np.array(reg2)[:,0],np.array(reg2)[:,1],c = 'r',marker = 'o')  
# plt.show()
# assert False 
'''
def produceData(r, w, d, num): 
    r1 = r-w/2
    r2 = r+w/2
    #上半圆 
    theta1 = np.random.uniform(0, np.pi ,num) 
    X_Col1 = np.random.uniform( r1*np.cos(theta1),r2*np.cos(theta1),num)[:, np.newaxis] 
    X_Row1 = np.random.uniform(r1*np.sin(theta1),r2*np.sin(theta1),num)[:, np.newaxis] 
    Y_label1 = np.ones(num) #类别标签为1 
    #下半圆 
    theta2 = np.random.uniform(-np.pi, 0 ,num) 
    X_Col2 = (np.random.uniform( r1*np.cos(theta2),r2*np.cos(theta2),num) + r)[:, np.newaxis] 
    X_Row2 = (np.random.uniform(r1 * np.sin(theta2), r2 * np.sin(theta2), num) -d)[:,np.newaxis] 
    Y_label2 = -np.ones(num) #类别标签为-1,注意:由于采取双曲正切函数作为激活函数,类别标签不能为0 
    #合并 
    X_Col = np.vstack((X_Col1, X_Col2)) 
    X_Row = np.vstack((X_Row1, X_Row2)) 
    X = np.hstack((X_Col, X_Row)) 
    Y_label = np.hstack((Y_label1,Y_label2)) 
    Y_label.shape = (num*2 , 1) 
    return X,Y_label
'''
np.random.shuffle(reg1)
np.random.shuffle(reg2)
train = np.r_[reg1[0:28070], reg2[0:27789]]
test = np.r_[reg1[28070:len(reg1)], reg2[27789:len(reg2)]]
# fig = plt.figure()  
# ax1 = fig.add_subplot(111)
# ax1.scatter(train[:,0], train[:,1], c='b', marker='o')  
# ax1.scatter(test[:,0], test[:,1], c = 'r',marker = 'o')  
# plt.show()
# assert False 

# train = np.random.normal(size=(500,2))
# plt.scatter(train[:, 0], train[:, 1])
# plt.show()
# assert 0

print("type(test):", type(test))

class TrainDataset(Dataset):
    def __init__(self):
        self.train_dataset = train### 76943*343
        # print("---------",self.train_dataset.shape, type( self.train_dataset[0][0]) )
        self.data_tf = transforms.ToTensor()
        
    def __getitem__(self, i):
        # print("type(self.train_dataset[i]):",type(self.train_dataset[i][2]))
        return torch.from_numpy(self.train_dataset[i][0:2]), torch.from_numpy(np.array([self.train_dataset[i][2:4]]))

    def __len__(self):
        return len(self.train_dataset)


class TestDataset(Dataset):
    def __init__(self):
        self.train_dataset = test### 76943*343
        # print("---------",self.train_dataset.shape, type( self.train_dataset[0][0]) )
        self.data_tf = transforms.ToTensor()
        
    def __getitem__(self, i):
        # print("type(self.train_dataset[i]):",type(self.train_dataset[i][2]))
        # print(type([self.train_dataset[i][2:4]]))
       
        return torch.from_numpy(self.train_dataset[i][0:2]), torch.from_numpy(np.array([self.train_dataset[i][2:4]]))

    def __len__(self):
        return len(self.train_dataset )


train_dataset = TrainDataset()
print(train_dataset, type(train_dataset))
test_dataset = TestDataset()
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=128,                                            
                                           shuffle=True)  ### 将数据打乱

test_loader = torch.utils.data.DataLoader(dataset=test_dataset, 
                                          batch_size=200, 
                                          shuffle=True)           

# 定义BP神经网络
class CNNnet(torch.nn.Module):
    def __init__(self):
        super(CNNnet, self).__init__()
        self.mlp1 = torch.nn.Linear(2, 10)
        # print( "============", type( self.mlp1 ))
        self.mlp2 = torch.nn.Linear(10, 10)
        self.mlp3 = torch.nn.Linear(10, 2)
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):       
        x = self.mlp1(x)
        # x = self.relu(x)
        x = Fun.relu(x)
        x = self.mlp2(x)
        # x = self.relu(x)
        x = Fun.relu(x)
        x = self.mlp3(x)
        # x = self.sigmoid(x)
        x = Fun.sigmoid(x)
        return x

model = CNNnet().double()
print(model) # foward() won't run
# assert False
# criterion=nn.CrossEntropyLoss()
#criterion = nn.BCEWithLogitsLoss()
criterion = nn.BCELoss()
total_step = len(train_loader)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

epochs = 15
loss_list = []
acc_list = []
for epoch in range(epochs):
    model.train()
    for i, (data,label) in enumerate(train_loader):
        out = model(data.double())
        predict = torch.max(out, 1)[1]
        label = label.view(out.shape[0], out.shape[1])
        labels_ = torch.max(label, 1)[1]
        acc = (predict == labels_).sum().item() / label.size()[0]
        acc_list.append(acc)
        # print(label.shape,label)
        loss = criterion(out, label.double())
        loss_list.append(loss.item())
        # print("loss",loss)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if (i+1)%100 == 0:
            print('Epoch[{}/{}], step[{}/{}], loss:{:.4f}, acc:{:.4f}'.format(epoch+1, epochs, i+1, total_step, loss.item(), acc)) 
            # loss.item() return the data item of a tensor
        
with torch.no_grad():
    correct = 0.
    total = 0.
    for data1, labels in test_loader:
        outputs = model(data1.double())
        predict = torch.max(outputs, 1)[1] # torch.max(t, axis=1) return elements and indexs
        total += labels.size(0)
        labels = labels.view(outputs.shape[0], outputs.shape[1]).double()
        labels_ = torch.max(labels, 1)[1]
        # print("outputs.shape:",labels_.shape, predict.shape)
        correct += (predict == labels_).sum().item()
    
  #  print("============:\n", outputs)
    # print("outputs.shape:",outputs.shape[0],outputs.shape[1])
    # print(labels.size(0),labels.shape, outputs.shape)
    # print("labels:",labels)ape)
  # print(outputs.shape, labels.shape)
    # print(predicted.shape,labels.sh)
    
    print('Accuracy of the network on the {} test samples: {} %'.format(total, 100 * correct/total))
    
torch.save(model.state_dict(), 'model.ckpt')

plt.figure()
plt.plot(loss_list)
plt.figure()
plt.plot(acc_list)
plt.show()

Dibujo la porción de código

import numpy as np
import scipy.io as scio
import torch 
import torch.nn.functional as Fun
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
### train
x1=np.arange(100)
y1=np.arange(401)
### test ay[]
x2=np.arange(110,210)
reg1=[]
reg1_2=[]
### the region 1 
for i in x1: 
    for j in y1:
        reg1_2.append([i,j])
        demo1=[i,j,1,0]
        reg1.append(demo1) 
reg2 = []
#print(reg1_2)
# assert False
reg2_2=[]
for i in x2: 
    for j in y1:
        reg2_2.append([i,j])
        demo2=[i,j,0,1]
        reg2.append(demo2) 
print(len(reg1)*0.7, len(reg2)*0.7)

np.random.shuffle(reg1)
np.random.shuffle(reg2)
train = np.r_[reg1[0:28070], reg2[0:27789]]
test = np.r_[reg1[28070:len(reg1)], reg2[27789:len(reg2)]]

print("type(test):", type(test))

class TrainDataset(Dataset):
    def __init__(self):
        self.train_dataset = train### 76943*343
        # print("---------",self.train_dataset.shape, type( self.train_dataset[0][0]) )
        self.data_tf = transforms.ToTensor()
        
    def __getitem__(self, i):
        # print("type(self.train_dataset[i]):",type(self.train_dataset[i][2]))
        return torch.from_numpy(self.train_dataset[i][0:2]), torch.from_numpy(np.array([self.train_dataset[i][2:4]]))#### 第4的一个元素不展示

    def __len__(self):
        return len(self.train_dataset)


class TestDataset(Dataset):
    def __init__(self):
        self.train_dataset = test### 76943*343
        # print("---------",self.train_dataset.shape, type( self.train_dataset[0][0]) )
        self.data_tf = transforms.ToTensor()
        
    def __getitem__(self, i):
        # print("type(self.train_dataset[i]):",type(self.train_dataset[i][2]))
        # print(type([self.train_dataset[i][2:4]]))
        return torch.from_numpy(self.train_dataset[i][0:2]), torch.from_numpy(np.array([self.train_dataset[i][2:4]]))

    def __len__(self):
        return len(self.train_dataset )


train_dataset = TrainDataset()
# print(train_dataset, type(train_dataset))
test_dataset = TestDataset()
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=128,                                            
                                           shuffle=True)  ### 将数据打乱

test_loader = torch.utils.data.DataLoader(dataset=test_dataset, 
                                          batch_size=200, 
                                          shuffle=True)           
# 定义BP神经网络
class CNNnet(torch.nn.Module):
    def __init__(self):
        super(CNNnet, self).__init__()
        self.mlp1 = torch.nn.Linear(2, 10)
        # print( "============", type( self.mlp1 ))
        self.mlp2 = torch.nn.Linear(10, 10)
        self.mlp3 = torch.nn.Linear(10, 2)
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()    #将输出全部置为0~1之间
    def forward(self, x):       
        x = self.mlp1(x)
        # x = self.relu(x)
        x = Fun.relu(x)
        x = self.mlp2(x)
        # x = self.relu(x)
        x = Fun.relu(x)
        x = self.mlp3(x)
        # x = self.sigmoid(x)
        x = Fun.sigmoid(x)
        return x
model = CNNnet().double()
print(model) # foward() won't run
# assert False
#  criterion=nn.CrossEntropyLoss()
#criterion = nn.BCEWithLogitsLoss()
criterion = nn.BCELoss()
total_step = len(train_loader)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
epochs = 10

for epoch in range(epochs):
    model.train()
    for i, (data,label) in enumerate(train_loader):
        out = model(data.double())
        loss = criterion(out, label.double())
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if (i+1)%100 == 0:
            print('Epoch[{}/{}], step[{}/{}],, loss:{:.4f}'.format(epoch+1, epochs, i+1, total_step, loss.item() )) 
            # loss.item() return the data item of a tensor
with torch.no_grad():
    correct = 0.
    total = 0.
    for data1, labels in test_loader:
        outputs = model(data1.double())
        predict = torch.max(outputs, 1)[1] # torch.max(t, axis=1) return elements and indexs
        total += labels.size(0)
        labels = labels.view(outputs.shape[0], outputs.shape[1]).double()
        labels_ = torch.max(labels, 1)[1]
        # print("outputs.shape:",labels_.shape, predict.shape)
        correct += (predict == labels_).sum().item()
    print('Accuracy of the network on the {} test samples: {} %'.format(total, 100 * correct/total))
    
torch.save(model.state_dict(), 'model.ckpt')

 

Publicados 234 artículos originales · ganado elogios 61 · vistas 120 000 +

Supongo que te gusta

Origin blog.csdn.net/weixin_42528089/article/details/103941358
Recomendado
Clasificación