深度可分离U_Net

1、背景

U_Net自提出之后就被各种操作,原因是结构简单,易改造,因此我受到Mobilenet的启发,也决定来改改玩。在提出的结构中将原来的通用卷积替换为深度可分离卷积,同时在分离卷积中又使用了扩张卷积,同时也利用了残差结构。

2、结构优势

2.1 深度可分离卷积:将空间特征和通道特征分开提取,极大的减少了运算的参数量。

2.2 扩张卷积:可以获得不同的感受野信息,对语义分割十分有利。

2.3 残差结构:能够使特征更快的向前传播易及残差函数更易优化,避免过拟合等。

3、代码

原模型结构:

import torch
from torch import nn

class block_down(nn.Module):
    
    def __init__(self,inp_channel,out_channel):
        super(block_down,self).__init__()
        self.conv1=nn.Conv2d(inp_channel,out_channel,3,padding=1)
        self.conv2=nn.Conv2d(out_channel,out_channel,3,padding=1)
        self.bn=nn.BatchNorm2d(out_channel)
        self.relu=nn.ReLU6(inplace=True)
        
    def forward(self,x):
        x=self.conv1(x)
        x=self.bn(x)
        x=self.relu(x)
        x=self.conv2(x)
        x=self.bn(x)
        x=self.relu(x)
        return x

class block_up(nn.Module):
    
    def __init__(self,inp_channel,out_channel):
        super(block_up,self).__init__()
        self.up=nn.ConvTranspose2d(inp_channel,out_channel,2,stride=2)
        self.conv1=nn.Conv2d(inp_channel,out_channel,3,padding=1)
        self.conv2=nn.Conv2d(out_channel,out_channel,3,padding=1)
        self.bn=nn.BatchNorm2d(out_channel)
        self.relu=nn.ReLU6(inplace=True)

    def forward(self,x,y):
        x=self.up(x)
        x=torch.cat([x,y],dim=1)
        x=self.conv1(x)
        x=self.bn(x)
        x=self.relu(x)
        x=self.conv2(x)
        x=self.bn(x)
        x=self.relu(x)
        return x

class U_net(nn.Module):
    
    def __init__(self,out_channel):
        super(U_net,self).__init__()
        self.out=nn.Conv2d(64,out_channel,1)
        self.maxpool=nn.MaxPool2d(2)
        self.block_down=block_down
        self.block_up=block_up
        self.block1=block_down(3,64)
        self.block2=block_down(64,128)
        self.block3=block_down(128,256)
        self.block4=block_down(256,512)
        self.block5=block_down(512,1024)
        self.block6=block_up(1024,512)
        self.block7=block_up(512,256)
        self.block8=block_up(256,128)
        self.block9=block_up(128,64)


    def forward(self,x):
        x1_use=self.block1(x)
        x1=self.maxpool(x1_use)
        x2_use=self.block2(x1)
        x2=self.maxpool(x2_use)
        x3_use=self.block3(x2)
        x3=self.maxpool(x3_use)
        x4_use=self.block4(x3)
        x4=self.maxpool(x4_use)
        x5=self.block5(x4)

        x6=self.block6(x5,x4_use)
        x7=self.block7(x6,x3_use)
        x8=self.block8(x7,x2_use)
        x9=self.block9(x8,x1_use)
        out=self.out(x9)
        return out


if __name__ == "__main__":
    test_input = torch.rand(1, 3, 480, 640).to("cuda")
    print("input_size:", test_input.size())
    model = U_net(3)
    model.cuda()
    ouput = model(test_input)
    print("output_size:", ouput.size())
    params = list(model.named_parameters())
    k = 0
    for name, param in params:
        print(name)
        if param.requires_grad:
            l = 1
            for i in param.size():
                l *= i
            k = k + l
        print(l)
    print("模型总的参数量是:" + str(k))

原模型运算结果为:

input_size: torch.Size([1, 3, 480, 640])
output_size: torch.Size([1, 3, 480, 640])
out.weight
192
out.bias
3
block1.conv1.weight
1728
block1.conv1.bias
64
block1.conv2.weight
36864
block1.conv2.bias
64
block1.bn.weight
64
block1.bn.bias
64
block2.conv1.weight
73728
block2.conv1.bias
128
block2.conv2.weight
147456
block2.conv2.bias
128
block2.bn.weight
128
block2.bn.bias
128
block3.conv1.weight
294912
block3.conv1.bias
256
block3.conv2.weight
589824
block3.conv2.bias
256
block3.bn.weight
256
block3.bn.bias
256
block4.conv1.weight
1179648
block4.conv1.bias
512
block4.conv2.weight
2359296
block4.conv2.bias
512
block4.bn.weight
512
block4.bn.bias
512
block5.conv1.weight
4718592
block5.conv1.bias
1024
block5.conv2.weight
9437184
block5.conv2.bias
1024
block5.bn.weight
1024
block5.bn.bias
1024
block6.up.weight
2097152
block6.up.bias
512
block6.conv1.weight
4718592
block6.conv1.bias
512
block6.conv2.weight
2359296
block6.conv2.bias
512
block6.bn.weight
512
block6.bn.bias
512
block7.up.weight
524288
block7.up.bias
256
block7.conv1.weight
1179648
block7.conv1.bias
256
block7.conv2.weight
589824
block7.conv2.bias
256
block7.bn.weight
256
block7.bn.bias
256
block8.up.weight
131072
block8.up.bias
128
block8.conv1.weight
294912
block8.conv1.bias
128
block8.conv2.weight
147456
block8.conv2.bias
128
block8.bn.weight
128
block8.bn.bias
128
block9.up.weight
32768
block9.up.bias
64
block9.conv1.weight
73728
block9.conv1.bias
64
block9.conv2.weight
36864
block9.conv2.bias
64
block9.bn.weight
64
block9.bn.bias
64
模型总的参数量是:31037763

新模型结构:

import torch
from torch import nn
import torch.nn.functional as F

def fixed_padding(inputs, kernel_size, dilation):
    kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
    pad_total = kernel_size_effective - 1
    pad_beg = pad_total // 2
    pad_end = pad_total - pad_beg
    padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
    return padded_inputs

class InvertedResidual(nn.Module):
    def __init__(self, inp, oup, expand):
        super(InvertedResidual, self).__init__()
        self.expand=expand
        self.conv = nn.Sequential(
            # dw
            nn.Conv2d(inp, inp, 3, 1, 0, dilation=expand, groups=inp, bias=False),
            nn.BatchNorm2d(inp),
            nn.ReLU6(inplace=True),
            # pw
            nn.Conv2d(inp, oup, 1, 1, 0, 1, bias=False),
        )

    def forward(self, x):
        x_pad = fixed_padding(x, 3, self.expand)
        y= self.conv(x_pad)
        return y

class block_down(nn.Module):

    def __init__(self, inp_channel, out_channel, expand):
        super(block_down, self).__init__()
        self.deepwise1 = InvertedResidual(inp_channel, inp_channel, expand)
        self.deepwise2 = InvertedResidual(inp_channel, out_channel, expand)
        self.resnet= nn.Conv2d(inp_channel, out_channel, 1, 1, 0, 1, bias=False)

    def forward(self, input):
        resnet=self.resnet(input)
        x = self.deepwise1(input)
        x= self.deepwise2(x)
        out=torch.add(resnet,x)
        return out


class block_up(nn.Module):

    def __init__(self, inp_channel, out_channel, expand):
        super(block_up, self).__init__()
        self.up = nn.ConvTranspose2d(inp_channel, out_channel, 2, stride=2)
        self.deepwise1 = InvertedResidual(inp_channel, inp_channel, expand)
        self.deepwise2 = InvertedResidual(inp_channel, out_channel, expand)
        self.resnet = nn.Conv2d(inp_channel, out_channel, 1, 1, 0, 1, bias=False)

    def forward(self, x, y):
        x = self.up(x)
        x1 = torch.cat([x, y], dim=1)
        x = self.deepwise1(x1)
        x = self.deepwise2(x)
        resnet=self.resnet(x1)
        out=torch.add(resnet,x)

        return out


class U_net(nn.Module):

    def __init__(self, class_num):
        super(U_net, self).__init__()
        self.inp = nn.Conv2d(3, 64, 1)
        self.block2 = block_down(64, 128, expand=1)
        self.block3 = block_down(128, 256, expand=2)
        self.block4 = block_down(256, 512, expand=2)
        self.block5 = block_down(512, 1024, expand=1)
        self.block6 = block_up(1024, 512, expand=1)
        self.block7 = block_up(512, 256, expand=1)
        self.block8 = block_up(256, 128, expand=2)
        self.block9 = block_up(128, 64, expand=2)
        self.out = nn.Conv2d(64, class_num, 1)
        self.maxpool = nn.MaxPool2d(2)

    def forward(self, x):
        x1_use = self.inp(x)
        x1 = self.maxpool(x1_use)
        x2_use = self.block2(x1)
        x2 = self.maxpool(x2_use)
        x3_use = self.block3(x2)
        x3 = self.maxpool(x3_use)
        x4_use = self.block4(x3)
        x4 = self.maxpool(x4_use)
        x5 = self.block5(x4)

        x6 = self.block6(x5, x4_use)
        x7 = self.block7(x6, x3_use)
        x8 = self.block8(x7, x2_use)
        x9 = self.block9(x8, x1_use)
        out= self.out(x9)
        return out


if __name__ == "__main__":
    test_input = torch.rand(1, 3, 480, 640).to("cuda")
    print("input_size:", test_input.size())
    model = U_net(3)
    model.cuda()
    ouput = model(test_input)
    print("output_size:", ouput.size())
    params=list(model.named_parameters())
    k=0
    for name,param in params:
        print(name)
        if param.requires_grad:
            l=1
            for i in param.size():
                l*=i
            k=k+l
        print(l)
    print("模型总的参数量是:"+str(k))

4、新模型运行结果

input_size: torch.Size([1, 3, 480, 640])
output_size: torch.Size([1, 3, 480, 640])
inp.weight
192
inp.bias
64
block2.deepwise1.conv.0.weight
576
block2.deepwise1.conv.1.weight
64
block2.deepwise1.conv.1.bias
64
block2.deepwise1.conv.3.weight
4096
block2.deepwise2.conv.0.weight
576
block2.deepwise2.conv.1.weight
64
block2.deepwise2.conv.1.bias
64
block2.deepwise2.conv.3.weight
8192
block2.resnet.weight
8192
block3.deepwise1.conv.0.weight
1152
block3.deepwise1.conv.1.weight
128
block3.deepwise1.conv.1.bias
128
block3.deepwise1.conv.3.weight
16384
block3.deepwise2.conv.0.weight
1152
block3.deepwise2.conv.1.weight
128
block3.deepwise2.conv.1.bias
128
block3.deepwise2.conv.3.weight
32768
block3.resnet.weight
32768
block4.deepwise1.conv.0.weight
2304
block4.deepwise1.conv.1.weight
256
block4.deepwise1.conv.1.bias
256
block4.deepwise1.conv.3.weight
65536
block4.deepwise2.conv.0.weight
2304
block4.deepwise2.conv.1.weight
256
block4.deepwise2.conv.1.bias
256
block4.deepwise2.conv.3.weight
131072
block4.resnet.weight
131072
block5.deepwise1.conv.0.weight
4608
block5.deepwise1.conv.1.weight
512
block5.deepwise1.conv.1.bias
512
block5.deepwise1.conv.3.weight
262144
block5.deepwise2.conv.0.weight
4608
block5.deepwise2.conv.1.weight
512
block5.deepwise2.conv.1.bias
512
block5.deepwise2.conv.3.weight
524288
block5.resnet.weight
524288
block6.up.weight
2097152
block6.up.bias
512
block6.deepwise1.conv.0.weight
9216
block6.deepwise1.conv.1.weight
1024
block6.deepwise1.conv.1.bias
1024
block6.deepwise1.conv.3.weight
1048576
block6.deepwise2.conv.0.weight
9216
block6.deepwise2.conv.1.weight
1024
block6.deepwise2.conv.1.bias
1024
block6.deepwise2.conv.3.weight
524288
block6.resnet.weight
524288
block7.up.weight
524288
block7.up.bias
256
block7.deepwise1.conv.0.weight
4608
block7.deepwise1.conv.1.weight
512
block7.deepwise1.conv.1.bias
512
block7.deepwise1.conv.3.weight
262144
block7.deepwise2.conv.0.weight
4608
block7.deepwise2.conv.1.weight
512
block7.deepwise2.conv.1.bias
512
block7.deepwise2.conv.3.weight
131072
block7.resnet.weight
131072
block8.up.weight
131072
block8.up.bias
128
block8.deepwise1.conv.0.weight
2304
block8.deepwise1.conv.1.weight
256
block8.deepwise1.conv.1.bias
256
block8.deepwise1.conv.3.weight
65536
block8.deepwise2.conv.0.weight
2304
block8.deepwise2.conv.1.weight
256
block8.deepwise2.conv.1.bias
256
block8.deepwise2.conv.3.weight
32768
block8.resnet.weight
32768
block9.up.weight
32768
block9.up.bias
64
block9.deepwise1.conv.0.weight
1152
block9.deepwise1.conv.1.weight
128
block9.deepwise1.conv.1.bias
128
block9.deepwise1.conv.3.weight
16384
block9.deepwise2.conv.0.weight
1152
block9.deepwise2.conv.1.weight
128
block9.deepwise2.conv.1.bias
128
block9.deepwise2.conv.3.weight
8192
block9.resnet.weight
8192
out.weight
192
out.bias
3
模型总的参数量是:7376131

可以看出参数量减少了3/4,新模型在我自己的数据集上语义分割任务中运行到第一个epoch时候的miou已经达到了0.5465。而原始U_Net的第一个epoch的miou为0.2590,当然这也不足以说明什么,但是新模型的batch size要设的比较小一点,比较耗显存,原因不明。新模型的检测速度为8.65ms(1080ti,单图480×640)。旧模型的检测速度为6.64ms,原因不明。

5、总结

模型明显参数减少,精度有“可能”提高了,速度却降下去了,模型有待后面继续测验。

猜你喜欢

转载自blog.csdn.net/weixin_41803339/article/details/96145639
今日推荐