Transformer代码实现

本文简单实现了标准的Transformer,也是我平时使用的Transformer基础代码。

导入相关包

import torch
import torch.nn as nn
from einops import rearrange

eniops操作张量维度非常方便,参考这篇文章 einops:优雅地操作张量维度

多头注意力机制

class MultiHeadAttention(nn.Module):
    def __init__(self,dim,num_heads,dim_head):
        """
        输入:(b, n, dim)
        dim: 输入序列的向量维度
        num_heads: 注意力头的个数
        dim_head: 每个注意力头的维度
        """
        super(MultiHeadAttention, self).__init__()
        self.num_heads = num_heads
        self.scale = dim_head ** -0.5
        # weight_dim : q,k,v的维度
        weight_dim = num_heads * dim_head
        self.qkv = nn.Linear(dim, weight_dim*3)
        self.proj = nn.Linear(weight_dim,dim)

    def forward(self,x):
        qkv = self.qkv(x).chunk(3,dim=-1)
        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.num_heads), qkv)
        dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
        attn = torch.softmax(dots,dim=-1)
        out = torch.matmul(attn,v)
        out = rearrange(out,'b h n d -> b n (h d)')
        out = self.proj(out)
        return out

全连接层

class FFN(nn.Module):
    def __init__(self, dim, hidden_dim, dropout=0.):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(dim, hidden_dim),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, dim),
            nn.Dropout(dropout)
        )

    def forward(self, x):
        return self.net(x)

Transformer

class PreNorm(nn.Module):
    def __init__(self, dim, fn):
        super().__init__()
        self.norm = nn.LayerNorm(dim)
        self.fn = fn

    def forward(self, x, **kwargs):
        return self.fn(self.norm(x), **kwargs)

    
class Transformer(nn.Module):
    def __init__(self, dim, num_heads, depth, embed_dim, mlp_dim, dropout=0.1):
        super().__init__()
        self.layers = nn.ModuleList([])
        dim_head = embed_dim // num_heads
        for _ in range(depth):
            self.layers.append(nn.ModuleList([
                PreNorm(dim, MultiHeadAttention(dim, num_heads, dim_head)),
                PreNorm(dim, FFN(dim, mlp_dim, dropout))
            ]))

    def forward(self, x):
        for attn, ff in self.layers:
            x = attn(x) + x
            x = ff(x) + x
        return x

这里的Transformer没有添加位置编码,实际使用时需自行引入位置信息。

位置编码示例

# 简单起见,使用可学习的位置编码
b, n, d = 3, 1000, 64
x = torch.randn(b, n, d)  # 输入序列
# 位置编码
pos_embed = nn.Parameter(torch.zeros(1, n, d))
nn.init.trunc_normal_(pos_embed, std=0.02)
# 添加位置编码
x = x + pos_embed

测试

if __name__ == '__main__':
    x = torch.randn(3, 1000, 64)
    net = Transformer(64,4,2,128,256)
    y = net(x)
    print(y.shape)

参考文献

Attention is All you Need

猜你喜欢

转载自blog.csdn.net/weixin_44858814/article/details/125548733