Visualization of the training process using visdom

Visdom is Facebook's visualization tool for PyTorch

仓库地址:GitHub - fossasia/visdom: A flexible tool for creating, organizing, and sharing visualizations of live, rich data. Supports Torch and Numpy.

1. Install

pip install visdom

2. start

visdom

or

python -m visdom.server

Will return an address, such as: http://localhost:8097/

Open it in the webpage, you can see the interface of visdom

Use visdom to detect training loss, example:

First start visdom in the terminal

import torch
import torch.nn as nn
from torch.optim import SGD
import torch.utils.data as Data
from sklearn.datasets import load_boston
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from visdom import Visdom

###定义一个简单的多层感知机网络
class MLPmodel(nn.Module):
    def __init__(self):
        super(MLPmodel,self).__init__()
        ##定义第一个隐藏层
        self.hidden1 = nn.Linear(
                                 in_features=13,
                                 out_features=10,
                                 bias = True,)
        self.active1 = nn.ReLU()
        ##定义第二个隐藏层
        self.hidden2 = nn.Linear(10,10)
        self.active2 = nn.ReLU()
        ##定义预测回归层
        self.regression = nn.Linear(10,1)
    ##定义网络的前向传播路径
    def forward(self,x):
        x = self.hidden1(x)
        x = self.active1(x)
        x = self.hidden2(x)
        x = self.active2(x)
        output = self.regression(x)
        return output

mlp = MLPmodel()
###加载波斯顿房价数据集
boston_X,boston_y = load_boston(return_X_y=True)

##数据标准化处理
ss = StandardScaler(with_mean=True,with_std=True)
boston_Xs = ss.fit_transform(boston_X)           #fit和transform的结合
print(boston_X)
##将数据预处理为可以使用PyTorch进行批量训练的形式
#训练集X转化为张量
train_xt = torch.from_numpy(boston_Xs.astype(np.float32))
#训练集y转化为张量
train_yt = torch.from_numpy(boston_y.astype(np.float32))
#将数据集转化为张量后,使用TensorDataset将X和y整理到一起
train_data = Data.TensorDataset(train_xt,train_yt)
#定义一个数据加载器,将训练数据集进行批量处理
train_loader = Data.DataLoader(
                               dataset = train_data,
                               batch_size = 128,
                               shuffle = True,
                               num_workers = 1,)


# 实例化一个窗口
vis = Visdom()
# 初始化窗口信息
vis.line([0.], # Y的第一个点的坐标
          [0.], # X的第一个点的坐标
          win = 'train_loss', # 窗口的名称
          opts = dict(title = 'train_loss') # 图像的标例
)
##对回归模型mlp进行训练并输出损失函数的变化情况,定义优化器和损失函数
optimizer = SGD(mlp.parameters(),lr=0.001)
loss_func = nn.MSELoss()
train_loss_all = []
i = 0
for epoch in range(600):
    ##对训练数据的加载器进行迭代计算
    for step,(b_x,b_y) in enumerate(train_loader):
        i = i+1
        output = mlp(b_x).flatten()                     #flatten把数据展平
        train_loss = loss_func(output,b_y)
        optimizer.zero_grad()
        train_loss.backward()
        optimizer.step()
        train_loss_all.append(train_loss.item())
        loss = train_loss.detach().numpy()
        vis.line([loss],[i],win = 'train_loss',update = 'append')



The loss curve can be obtained as shown in the figure

Guess you like

Origin blog.csdn.net/a545454669/article/details/127077527