mindspore比pytorch快?是的

华为宣传说mindspore比pytorch快,说是加了自动微风,确实在mindspore中训练不需要自己写优化的过程,不过空说无凭,试验了一下,真的快一些

这里拿mnist分类的例子做实验

epoch选取了10和50

mindspore:

# -*- coding: utf-8 -*-
import os
import time
import mindspore.nn as nn
from mindspore.common.initializer import Normal
from mindspore import Tensor, Model,export,load_checkpoint
from mindspore.nn.metrics import Accuracy
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
import mindspore.dataset.vision.c_transforms as CV
import mindspore.dataset.transforms.c_transforms as C
from mindspore.dataset.vision import Inter
from mindspore import dtype as mstype
import numpy as np
import mindspore.dataset as ds
from mindspore.train.callback import Callback


# https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/datasets/MNIST_Data.zipa
train_data_path = "./datasets/MNIST_Data/train"
test_data_path = "./datasets/MNIST_Data/test"
mnist_path = "./datasets/MNIST_Data"
model_path = "./models/ckpt/"

#定义数据集
def create_dataset(data_path, batch_size=128, repeat_size=1,
                   num_parallel_workers=1):
    """ 
    create dataset for train or test
    
    Args:
        data_path (str): Data path
        batch_size (int): The number of data records in each group
        repeat_size (int): The number of replicated data records
        num_parallel_workers (int): The number of parallel workers
    """
    # define dataset
    mnist_ds = ds.MnistDataset(data_path)

    # define some parameters needed for data enhancement and rough justification
    resize_height, resize_width = 32, 32
    rescale = 1.0 / 255.0
    shift = 0.0
    rescale_nml = 1 / 0.3081
    shift_nml = -1 * 0.1307 / 0.3081

    # according to the parameters, generate the corresponding data enhancement method
    resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR)
    rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)
    rescale_op = CV.Rescale(rescale, shift)
    hwc2chw_op = CV.HWC2CHW()
    type_cast_op = C.TypeCast(mstype.int32)

    # using map to apply operations to a dataset
    mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(operations=resize_op, input_columns="image", num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(operations=rescale_op, input_columns="image", num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns="image", num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns="image", num_parallel_workers=num_parallel_workers)
    
    # process the generated dataset
    buffer_size = 10000
    mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size)
    mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
    mnist_ds = mnist_ds.repeat(repeat_size)

    return mnist_ds

start=time.time()

#定义网络            
class  mnist(nn.Cell):
     def __init__(self, num_class=10):
        super(mnist, self).__init__()
        self.conv1 = nn.Conv2d(1, 8, 5, pad_mode='valid')
        self.conv2 = nn.Conv2d(8, 12, 5, pad_mode='valid')
        self.fc1 = nn.Dense(300 , 120, weight_init=Normal(0.02))
        self.fc2 = nn.Dense(120, 60, weight_init=Normal(0.02))
        self.fc3 = nn.Dense(60, num_class, weight_init=Normal(0.02))
        self.relu = nn.ReLU()
        self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
        self.flatten = nn.Flatten()
        
     def construct(self, x):
        x = self.max_pool2d(self.relu(self.conv1(x)))
        x = self.max_pool2d(self.relu(self.conv2(x)))        
        x = self.flatten(x)
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        x = self.fc3(x) 
        return x    
                
network = mnist()
net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)
net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')

#定义模型
model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()} )

repeat_size = 1
ds_train = create_dataset(os.path.join(mnist_path, "train"), 128, repeat_size)
model.train(50, ds_train, dataset_sink_mode=False)
print(time.time()-start)


pytorch:

# -*- coding: utf-8 -*-
import time
import torch.nn as nn
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
from torch.optim import lr_scheduler
# transforms

transform = transforms.Compose([
                                transforms.Resize(32),
                                transforms.ToTensor(),                                
                transforms.Normalize((0.5,), (0.5,))])

# datasets
trainset = torchvision.datasets.MNIST('data',download=True,train=True, transform=transform)
testset = torchvision.datasets.MNIST('data',download=True,train=False,transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,
                                            shuffle=True, num_workers=0)
testloader = torch.utils.data.DataLoader(testset, batch_size=128,
                                            shuffle=False, num_workers=0)

start=time.time()

class mnist(nn.Module):
    def __init__(self,num_classes=10):
        super(mnist, self).__init__()
        self.conv1=nn.Conv2d(1,8,5,padding_mode='reflect')
        self.conv2=nn.Conv2d(8,12,5,padding_mode='reflect')
        self.fc1=nn.Linear(300,120)
        self.fc2=nn.Linear(120,60)
        self.fc3=nn.Linear(60,num_classes)
        self.relu=nn.ReLU()
        self.max_pool2d=nn.MaxPool2d(2,2)
        self.flatten = nn.Flatten()
        
    def forward(self,x):
        x = self.max_pool2d(self.relu(self.conv1(x)))
        x = self.max_pool2d(self.relu(self.conv2(x)))        
        x = self.flatten(x)
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        x = self.fc3(x) 
        return x 
    
    
    
model = mnist().to(device='cpu')
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
criterion = nn.CrossEntropyLoss()
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)

for epoch in range(50):
    for index,data in enumerate(trainloader):
        inputs,labels=data
        inputs = inputs.to('cpu')
        labels = labels.to('cpu')
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        if index%100==0:
            print(index,loss)
            
print(time.time()-start)
#torch.save(model, 'FashionMNIST.pth')


运行结果:

epoch mindspore pytorch
10 147.7769854068756 199.70614314079285
50 747.4645166397095 985.8706977367401

可以看到确实pytorch速度是快不少, mindspore在微分方面的优化效果显著。

猜你喜欢

转载自blog.csdn.net/zhou_438/article/details/114080067