chainer-骨干网络backbone-MnasNet代码重构【附源码】


前言

本文基于chainer实现MnasNet网络结构,并基于torch的结构方式构建chainer版的,并计算MnasNet的参数量。


代码实现


_BN_MOMENTUM = 1 - 0.9997

class _InvertedResidual(chainer.Chain):
    def __init__(self, in_ch: int, out_ch: int, kernel_size: int, stride: int, expansion_factor: int, bn_momentum: float = 0.1) -> None:
        super().__init__()
        if stride not in [1, 2]:
            raise ValueError(f"stride should be 1 or 2 instead of {
      
      stride}")
        if kernel_size not in [3, 5]:
            raise ValueError(f"kernel_size should be 3 or 5 instead of {
      
      kernel_size}")
        mid_ch = in_ch * expansion_factor
        self.apply_residual = in_ch == out_ch and stride == 1
        self.layers = []
        self.layers += [('pw_conv1',L.Convolution2D(in_channels=in_ch,out_channels=mid_ch,ksize=1,nobias=True))]
        self.layers += [('pw_bn1',L.BatchNormalization(mid_ch))]
        self.layers += [('_pw_relu1',ReLU())]
        self.layers += [('dw_conv1',L.Convolution2D(in_channels=mid_ch,out_channels=mid_ch,ksize=kernel_size,pad=kernel_size//2,stride=stride,groups=mid_ch,nobias=True))]
        self.layers += [('dw_bn1',L.BatchNormalization(mid_ch))]
        self.layers += [('_dw_relu2',ReLU())]
        
        self.layers += [('conv3',L.Convolution2D(in_channels=mid_ch,out_channels=out_ch,ksize=1,nobias=True))]
        self.layers += [('dw_bn3',L.BatchNormalization(out_ch))]
        
        with self.init_scope():
            for n in self.layers:
                if not n[0].startswith('_'):
                    setattr(self, n[0], n[1])
                    
    def forward(self, x):
        shortcut=x
        for n, f in self.layers:
            if not n.startswith('_'):
                x = getattr(self, n)(x)
            else:
                x = f.apply((x,))[0]
        
        if self.apply_residual:
            return x + shortcut
        else:
            return x

class _Stack(chainer.Chain):
    def __init__(self,in_ch: int, out_ch: int, kernel_size: int, stride: int, exp_factor: int, repeats: int, bn_momentum: float):
        super().__init__()
        if repeats < 1:
            raise ValueError(f"repeats should be >= 1, instead got {
      
      repeats}")
        self.layers=[]
        self.layers += [('first',_InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor, bn_momentum=bn_momentum))]
        
        for _ in range(1, repeats):
            self.layers += [('remaining_{0}'.format(_),_InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor, bn_momentum=bn_momentum))]
        
        with self.init_scope():
            for n in self.layers:
                if not n[0].startswith('_'):
                    setattr(self, n[0], n[1])
    
    def forward(self, x):
        for n, f in self.layers:
            if not n.startswith('_'):
                x = getattr(self, n)(x)
            else:
                x = f.apply((x,))[0]
        return x
             
def _round_to_multiple_of(val: float, divisor: int, round_up_bias: float = 0.9) -> int:
    if not 0.0 < round_up_bias < 1.0:
        raise ValueError(f"round_up_bias should be greater than 0.0 and smaller than 1.0 instead of {
      
      round_up_bias}")
    new_val = max(divisor, int(val + divisor / 2) // divisor * divisor)
    return new_val if new_val >= round_up_bias * val else new_val + divisor

def _get_depths(alpha: float):
    depths = [32, 16, 24, 40, 80, 96, 192, 320]
    return [_round_to_multiple_of(depth * alpha, 8) for depth in depths]

class MNASNet(chainer.Chain):
    cfgs={
    
    
        'mnasnet0_5':{
    
    'alpha':0.5,'image_size':224},
        'mnasnet0_75':{
    
    'alpha':0.75,'image_size':224},
        'mnasnet1_0':{
    
    'alpha':1.0,'image_size':224},
        'mnasnet1_3':{
    
    'alpha':1.3,'image_size':224}
    }

    def __init__(self, model_name='mnasnet1_0', channels=3,image_size=224 ,batch_size=4,num_classes: int = 1000, dropout: float = 0.2,**kwargs) -> None:
        super().__init__()
        self.alpha = self.cfgs[model_name]['alpha']
        self.num_classes = num_classes
        depths = _get_depths(self.cfgs[model_name]['alpha'])
        
        self.layers = []
        self.layers += [('conv1',L.Convolution2D(in_channels=channels,out_channels=depths[0],ksize=3,pad=1,stride=2,nobias=True))]
        output_size = int((image_size-3+2*1)/2+1)
        self.layers += [('bn1',L.BatchNormalization(depths[0]))]
        self.layers += [('_relu1',ReLU())]
        self.layers += [('conv2',L.Convolution2D(in_channels=depths[0],out_channels=depths[0],ksize=3,pad=1,stride=1,groups=depths[0],nobias=True))]
        self.layers += [('bn2',L.BatchNormalization(depths[0]))]
        self.layers += [('_relu2',ReLU())]
        self.layers += [('conv3',L.Convolution2D(in_channels=depths[0],out_channels=depths[1],ksize=1,pad=0,stride=1,nobias=True))]
        self.layers += [('bn3',L.BatchNormalization(depths[1]))]
        self.layers += [('stack1',_Stack(depths[1], depths[2], 3, 2, 3, 3, _BN_MOMENTUM))]
        output_size = int((output_size-3+2*(3//2))/2+1)
        self.layers += [('stack2',_Stack(depths[2], depths[3], 5, 2, 3, 3, _BN_MOMENTUM))]
        output_size = int((output_size-5+2**(5//2))/2+1)
        self.layers += [('stack3',_Stack(depths[3], depths[4], 5, 2, 6, 3, _BN_MOMENTUM))]
        output_size = int((output_size-5+2**(5//2))/2+1)
        self.layers += [('stack4',_Stack(depths[4], depths[5], 3, 1, 6, 2, _BN_MOMENTUM))]
        output_size = int((output_size-3+2**(3//2))/1+1)
        self.layers += [('stack5',_Stack(depths[5], depths[6], 5, 2, 6, 4, _BN_MOMENTUM))]
        output_size = int((output_size-5+2**(5//2))/2+1)
        self.layers += [('stack6',_Stack(depths[6], depths[7], 3, 1, 6, 1, _BN_MOMENTUM))]
        output_size = int((output_size-3+2**(3//2))/1+1)
        self.layers += [('conv4',L.Convolution2D(in_channels=depths[7],out_channels=1280,ksize=1,pad=0,stride=1,nobias=True))]
        self.layers += [('bn4',L.BatchNormalization(1280))]
        self.layers += [('_relu4',ReLU())]
        
        self.layers += [('_avgpool',AveragePooling2D(ksize=output_size,stride=1,pad=0))]
        self.layers += [('_reshape',Reshape((batch_size,1280)))]
        
        self.layers+= [('_dropout',Dropout(dropout))]
        self.layers+= [('fc',L.Linear(1280,num_classes))]
        
        with self.init_scope():
            for n in self.layers:
                if not n[0].startswith('_'):
                    setattr(self, n[0], n[1])

    def forward(self, x):
        for n, f in self.layers:
            origin_size = x.shape
            if not n.startswith('_'):
                x = getattr(self, n)(x)
            else:
                x = f.apply((x,))[0]
            print(n,origin_size,x.shape)
            
        if chainer.config.train:
            return x
        return F.softmax(x)

注意此类就是MnasNet的实现过程,注意网络的前向传播过程中,分了训练以及测试。
训练过程中直接返回x,测试过程中会进入softmax得出概率

调用方式

if __name__ == '__main__':
    batch_size = 4
    n_channels = 3
    image_size = 199
    num_classes = 123
    
    model = MNASNet(num_classes=num_classes, channels=n_channels,image_size=image_size,batch_size=batch_size)
    print(model.count_params())

    x = np.random.rand(batch_size, n_channels, image_size, image_size).astype(np.float32)
    t = np.random.randint(0, num_classes, size=(batch_size,)).astype(np.int32)
    with chainer.using_config('train', True):
        y1 = model(x)
    loss1 = F.softmax_cross_entropy(y1, t)

在这里插入图片描述

猜你喜欢

转载自blog.csdn.net/ctu_sue/article/details/128683333