简单易懂的 pytorch 使用 DistributedDataParallel 进行 单机多卡训练

首先放一个官方的案例,照着案例改就行

import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim

from torch.nn.parallel import DistributedDataParallel as DDP

class ToyModel(nn.Module):
    def __init__(self):
        super(ToyModel, self).__init__()
        self.net1 = nn.Linear(10, 10)
        self.relu = nn.ReLU()
        self.net2 = nn.Linear(10, 5)

    def forward(self, x):
        return self.net2(self.relu(self.net1(x)))


def demo_basic():
    dist.init_process_group("nccl")
    rank = dist.get_rank()
    print(f"Start running basic DDP example on rank {
      
      rank}.")

    # create model and move it to GPU with id rank
    device_id = rank % torch.cuda.device_count()
    model = ToyModel().to(device_id)
    ddp_model = DDP(model, device_ids=[device_id])

    loss_fn = nn.MSELoss()
    optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)

    optimizer.zero_grad()
    outputs = ddp_model(torch.randn(20, 10))
    labels = torch.randn(20, 5).to(device_id)
    loss_fn(outputs, labels).backward()
    optimizer.step()

if __name__ == "__main__":
    demo_basic()

运行的时候执行 torchrun --nproc_per_node=8 elastic_ddp.py就可以了,8是说单机上有多少显卡
需要注意的几点:

  1. DataSet 和 DataLoader 尽量使用 pytorch 的
  2. 之后参与计算的数据,例如代码中的labels,都需要to(device_id)

猜你喜欢

转载自blog.csdn.net/Defiler_Lee/article/details/127935889