时间序列通道注意力模块

inspiration comes from SEnet

import torch
import torch.nn as nn
import math

class ts_channel_block(nn.Module):
    def __init__(self, channel, ratio=1):
        super(ts_channel_block, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool1d(1) #innovation
        self.fc = nn.Sequential(
                nn.Linear(channel, channel // ratio, bias=False),
                nn.ReLU(inplace=True),
                nn.Linear(channel // ratio, channel, bias=False),
                nn.Sigmoid()
        )

    def forward(self, x):
        b, c, l = x.size() # (B,C,L)
        # y = self.avg_pool(x) # (B,C,L) 通过avg=》 (B,C,1)
        # print("y",y.shape)
        y = self.avg_pool(x).view(b, c) # (B,C,L) 通过avg=》 (B,C,1)
        print("y",y.shape)
        #为了丢给Linear学习,需要view把数据展平开
        # y = self.fc(y).view(b, c, 96)

        y = self.fc(y).view(b,c,1)
        print("y",y.shape)
        return x * y
tsam = ts_channel_block(7)
tensor = torch.randn(8,7,96)
print(tensor.shape)
output = tsam(tensor)
print(output.shape)

torch.Size([8, 7, 96])

y torch.Size([8, 7])

y torch.Size([8, 7, 1])

torch.Size([8, 7, 96])

猜你喜欢

转载自blog.csdn.net/weixin_43332715/article/details/127046121