文本分类(三) | (4)模型及其配置的定义(基于预训练语言模型)

完整项目​​​​​​​

本篇博客,主要介绍各个模型的模块定义,包括模型本身的定义以及模型对应的配置(超参数)的定义,二者在一个模块文件中。

目录

1. bert

2. bert+CNN

3. bert+RNN

4. bert+RCNN

5. bert+DPCNN

6. ERNIE


 

1. bert

  • 配置类
class Config(object):

    """配置参数"""
    def __init__(self, dataset):
        self.model_name = 'bert'
        #训练集、验证集、测试集路径
        self.train_path = dataset + '/data/train.txt'                                
        self.dev_path = dataset + '/data/dev.txt'                                    
        self.test_path = dataset + '/data/test.txt'  
        #类别名单
        self.class_list = [x.strip() for x in open(
            dataset + '/data/class.txt').readlines()]          
        #存储模型的训练结果
        self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'        
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')   # 设备

        self.require_improvement = 1000                                 # 若超过1000batch效果还没提升,则提前结束训练
        self.num_classes = len(self.class_list)                         # 类别数
        self.num_epochs = 3                                             # epoch数
        self.batch_size = 128                                           # mini-batch大小
        self.pad_size = 32                                              # 每句话处理成的长度(短填长切)
        self.learning_rate = 5e-5                                       # 学习率
        #预训练模型相关文件(模型文件.bin、配置文件.json、词表文件vocab.txt)存储路径
        self.bert_path = './bert_pretrain'
        #序列切分工具
        self.tokenizer = BertTokenizer.from_pretrained(self.bert_path)
        #隐藏单元数
        self.hidden_size = 768
  • 模型定义类

class Model(nn.Module):

    def __init__(self, config):
        super(Model, self).__init__()
        #加载bert预训练模型
        self.bert = BertModel.from_pretrained(config.bert_path)
        #微调
        for param in self.bert.parameters():
            param.requires_grad = True #finetuning
        #输出层
        self.fc = nn.Linear(config.hidden_size, config.num_classes)

    def forward(self, x):
        context = x[0]  # 输入的句子  (batch,seq_len)
        mask = x[2]  # 对padding部分进行mask,和句子一个size,padding部分用0表示,如:[1, 1, 1, 1, 0, 0]
        _, pooled = self.bert(context, attention_mask=mask, output_all_encoded_layers=False) #pooled (batch,hidden_size) cls对应的最后一层的编码向量
        out = self.fc(pooled) #(batch,classes)
        return out

2. bert+CNN

  • 配置类
class Config(object):

    """配置参数"""
    def __init__(self, dataset):
        self.model_name = 'bert'
        # 训练集、验证集、测试集路径
        self.train_path = dataset + '/data/train.txt'
        self.dev_path = dataset + '/data/dev.txt'
        self.test_path = dataset + '/data/test.txt'
        # 类别名单
        self.class_list = [x.strip() for x in open(
            dataset + '/data/class.txt').readlines()]
        # 存储模型的训练结果
        self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # 设备
        
        self.require_improvement = 1000                                 # 若超过1000batch效果还没提升,则提前结束训练
        self.num_classes = len(self.class_list)                         # 类别数
        self.num_epochs = 3                                             # epoch数
        self.batch_size = 128                                           # mini-batch大小
        self.pad_size = 32                                              # 每句话处理成的长度(短填长切)
        self.learning_rate = 5e-5                                       # 学习率

        # 预训练模型相关文件(模型文件.bin、配置文件.json、词表文件vocab.txt)存储路径
        self.bert_path = './bert_pretrain'
        # 序列切分工具
        self.tokenizer = BertTokenizer.from_pretrained(self.bert_path)
        # 隐藏单元数
        self.hidden_size = 768
        self.filter_sizes = (2, 3, 4)                                   # 不同大小卷积核尺寸
        self.num_filters = 256                                          # 不同大小卷积核数量(channels数)
        self.dropout = 0.1      #丢弃率
  • 模型定义类
class Model(nn.Module):

    def __init__(self, config):
        super(Model, self).__init__()
        # 加载bert预训练模型
        self.bert = BertModel.from_pretrained(config.bert_path)
        # 微调
        for param in self.bert.parameters():
            param.requires_grad = True
        #不同大小卷积核的2d卷积操作
        self.convs = nn.ModuleList(
            [nn.Conv2d(1, config.num_filters, (k, config.hidden_size)) for k in config.filter_sizes])
        self.dropout = nn.Dropout(config.dropout)


        self.fc_cnn = nn.Linear(config.num_filters * len(config.filter_sizes), config.num_classes)

    def conv_and_pool(self, x, conv):
        x = F.relu(conv(x)).squeeze(3)#(batch,num_filters,height)
        x = F.max_pool1d(x, x.size(2)).squeeze(2) #(batch,num_filters) 全局最大池化
        return x

    def forward(self, x):
        context = x[0]  # 输入的句子 (batch,seq_len)
        mask = x[2]  # 对padding部分进行mask,和句子一个size,padding部分用0表示,如:[1, 1, 1, 1, 0, 0]
        #encoder_out (batch,seq_len,hidden_size) 最后一层 各个时刻/token对应的编码向量
        #text_cls (batch,hidden_size) 最后一层 cls token对应的编码向量
        encoder_out, text_cls = self.bert(context, attention_mask=mask, output_all_encoded_layers=False)
        out = encoder_out.unsqueeze(1) #添加通道维 方便2d卷积 (batch,1,seq_len,hidden_size)
        out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1) #(batch,num_filters*len(filter_sizes)
        out = self.dropout(out)
        out = self.fc_cnn(out) #(batch,classes)
        return out

3. bert+RNN

  • 配置类
class Config(object):

    """配置参数"""
    def __init__(self, dataset):
        self.model_name = 'bert'
        # 训练集、验证集、测试集路径
        self.train_path = dataset + '/data/train.txt'
        self.dev_path = dataset + '/data/dev.txt'
        self.test_path = dataset + '/data/test.txt'
        # 类别名单
        self.class_list = [x.strip() for x in open(
            dataset + '/data/class.txt').readlines()]
        # 存储模型的训练结果
        self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # 设备

        self.require_improvement = 1000  # 若超过1000batch效果还没提升,则提前结束训练
        self.num_classes = len(self.class_list)  # 类别数
        self.num_epochs = 3  # epoch数
        self.batch_size = 128  # mini-batch大小
        self.pad_size = 32  # 每句话处理成的长度(短填长切)
        self.learning_rate = 5e-5  # 学习率

        # 预训练模型相关文件(模型文件.bin、配置文件.json、词表文件vocab.txt)存储路径
        self.bert_path = './bert_pretrain'
        # 序列切分工具
        self.tokenizer = BertTokenizer.from_pretrained(self.bert_path)
        # 隐藏单元数
        self.hidden_size = 768
        
        self.dropout = 0.1 #丢弃率
        self.rnn_hidden = 768 #rnn隐藏单元数
        self.num_layers = 2 #rnn层数
  • 模型定义类
class Model(nn.Module):

    def __init__(self, config):
        super(Model, self).__init__()

        # 加载bert预训练模型
        self.bert = BertModel.from_pretrained(config.bert_path)
        # 微调
        for param in self.bert.parameters():
            param.requires_grad = True
        #两层双向lstm batch_size 为第一维度
        self.lstm = nn.LSTM(config.hidden_size, config.rnn_hidden, config.num_layers,
                            bidirectional=True, batch_first=True, dropout=config.dropout)
        self.dropout = nn.Dropout(config.dropout)
        self.fc_rnn = nn.Linear(config.rnn_hidden * 2, config.num_classes)

    def forward(self, x):
        context = x[0]  # 输入的句子 (batch,seq_len)
        mask = x[2]  # 对padding部分进行mask,和句子一个size,padding部分用0表示,如:[1, 1, 1, 1, 0, 0]
        # encoder_out (batch,seq_len,hidden_size) 最后一层 各个时刻/token对应的编码向量
        # text_cls (batch,hidden_size) 最后一层 cls token对应的编码向量
        encoder_out, text_cls = self.bert(context, attention_mask=mask, output_all_encoded_layers=False)

        out, _ = self.lstm(encoder_out) #(batch,seq_len,rnn_hidden_size*2)
        out = self.dropout(out)
        out = self.fc_rnn(out[:, -1, :])  # 句子最后时刻的 hidden state (batch,classes)
        return out

4. bert+RCNN

  • 配置类
class Config(object):

    """配置参数"""
    def __init__(self, dataset):
        # 训练集、验证集、测试集路径
        self.train_path = dataset + '/data/train.txt'
        self.dev_path = dataset + '/data/dev.txt'
        self.test_path = dataset + '/data/test.txt'
        # 类别名单
        self.class_list = [x.strip() for x in open(
            dataset + '/data/class.txt').readlines()]
        # 存储模型的训练结果
        self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # 设备

        self.require_improvement = 1000  # 若超过1000batch效果还没提升,则提前结束训练
        self.num_classes = len(self.class_list)  # 类别数
        self.num_epochs = 3  # epoch数
        self.batch_size = 128  # mini-batch大小
        self.pad_size = 32  # 每句话处理成的长度(短填长切)
        self.learning_rate = 5e-5  # 学习率

        # 预训练模型相关文件(模型文件.bin、配置文件.json、词表文件vocab.txt)存储路径
        self.bert_path = './bert_pretrain'
        # 序列切分工具
        self.tokenizer = BertTokenizer.from_pretrained(self.bert_path)
        # 隐藏单元数
        self.hidden_size = 768
        
        self.dropout = 0.1
        self.rnn_hidden = 256 #rnn 隐藏单元数
        self.num_layers = 2  #rnn层数
  • 模型定义类
class Model(nn.Module):

    def __init__(self, config):
        super(Model, self).__init__()
        # 加载bert预训练模型
        self.bert = BertModel.from_pretrained(config.bert_path)
        # 微调
        for param in self.bert.parameters():
            param.requires_grad = True
        # 两层双向lstm batch_size 为第一维度
        self.lstm = nn.LSTM(config.hidden_size, config.rnn_hidden, config.num_layers,
                            bidirectional=True, batch_first=True, dropout=config.dropout)
        self.maxpool = nn.MaxPool1d(config.pad_size) #1维全局最大池化
        self.fc = nn.Linear(config.rnn_hidden * 2 + config.hidden_size, config.num_classes)

    def forward(self, x):
        context = x[0]  # 输入的句子 (batch,seq_len)
        mask = x[2]  # 对padding部分进行mask,和句子一个size,padding部分用0表示,如:[1, 1, 1, 1, 0, 0]
        # encoder_out (batch,seq_len,hidden_size) 最后一层 各个时刻/token对应的编码向量
        # text_cls (batch,hidden_size) 最后一层 cls token对应的编码向量
        encoder_out, text_cls = self.bert(context, attention_mask=mask, output_all_encoded_layers=False)

        out, _ = self.lstm(encoder_out) #(batch,seq_len,rnn_hidden_size*2)
        out = torch.cat((encoder_out, out), 2) #(batch,seq_len,hidden_size+rnn_hidden_size*2)
        out = F.relu(out)
        out = out.permute(0, 2, 1) #(batch,hidden_size+rnn_hidden_size*2,seq_len) 把隐藏状态维提前 作为通道维  对seq_len维作全局最大池化
        out = self.maxpool(out).squeeze() #(batch,hidden_size+rnn_hidden_size*2)
        out = self.fc(out) #(batch,classes)
        return out

5. bert+DPCNN

  • 配置类
class Config(object):

    """配置参数"""
    def __init__(self, dataset):
        self.model_name = 'bert'
        # 训练集、验证集、测试集路径
        self.train_path = dataset + '/data/train.txt'
        self.dev_path = dataset + '/data/dev.txt'
        self.test_path = dataset + '/data/test.txt'
        # 类别名单
        self.class_list = [x.strip() for x in open(
            dataset + '/data/class.txt').readlines()]
        # 存储模型的训练结果
        self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # 设备

        self.require_improvement = 1000  # 若超过1000batch效果还没提升,则提前结束训练
        self.num_classes = len(self.class_list)  # 类别数
        self.num_epochs = 3  # epoch数
        self.batch_size = 128  # mini-batch大小
        self.pad_size = 32  # 每句话处理成的长度(短填长切)
        self.learning_rate = 5e-5  # 学习率

        # 预训练模型相关文件(模型文件.bin、配置文件.json、词表文件vocab.txt)存储路径
        self.bert_path = './bert_pretrain'
        # 序列切分工具
        self.tokenizer = BertTokenizer.from_pretrained(self.bert_path)
        # 隐藏单元数
        self.hidden_size = 768
        self.num_filters = 250                                          # 卷积核数量(channels数)
  • 模型定义类
class Model(nn.Module):

    def __init__(self, config):
        super(Model, self).__init__()
        # 加载bert预训练模型
        self.bert = BertModel.from_pretrained(config.bert_path)
        # 微调
        for param in self.bert.parameters():
            param.requires_grad = True

        # self.fc = nn.Linear(config.hidden_size, config.num_classes)
        # region embedding 类似于TextCNN中的卷积操作
        self.conv_region = nn.Conv2d(1, config.num_filters, (3, config.hidden_size), stride=1)
        
        self.conv = nn.Conv2d(config.num_filters, config.num_filters, (3, 1), stride=1)
        self.max_pool = nn.MaxPool2d(kernel_size=(3, 1), stride=2)
        self.padding1 = nn.ZeroPad2d((0, 0, 1, 1))  # top bottom 上下各添一个0
        self.padding2 = nn.ZeroPad2d((0, 0, 0, 1))  # bottom 下添一个0
        self.relu = nn.ReLU()
        self.fc = nn.Linear(config.num_filters, config.num_classes)

    def forward(self, x):
        context = x[0]  # 输入的句子 (batch,seq_len)
        mask = x[2]  # 对padding部分进行mask,和句子一个size,padding部分用0表示,如:[1, 1, 1, 1, 0, 0]
        # encoder_out (batch,seq_len,hidden_size) 最后一层 各个时刻/token对应的编码向量
        # text_cls (batch,hidden_size) 最后一层 cls token对应的编码向量
        encoder_out, text_cls = self.bert(context, attention_mask=mask, output_all_encoded_layers=False)
        
        x = encoder_out.unsqueeze(1)  # [batch_size, 1, seq_len, hidden_size] 增加通道维 方便2d卷积
        x = self.conv_region(x)  # [batch_size, num_filters, seq_len-3+1, 1]

        #等长卷积 序列长度不变 通过卷积和填充操作实现
        #激活函数在卷积操作之前
        x = self.padding1(x)  # [batch_size, num_filters, seq_len, 1]
        x = self.relu(x)
        x = self.conv(x)  # [batch_size, num_filters, seq_len-3+1, 1]
        x = self.padding1(x)  # [batch_size, num_filters, seq_len, 1]
        x = self.relu(x)
        x = self.conv(x)  # [batch_size, num_filters, seq_len-3+1, 1]
        
        while x.size()[2] > 2: #序列长度>2 则不停循环下列操作
            x = self._block(x)
        x = x.squeeze()  # [batch_size, num_filters(250)]
        x = self.fc(x)
        return x

    def _block(self, x): #x [batch_size, num_filters, seq_len-3+1, 1]
        x = self.padding2(x)  #[batch_size, num_filters, seq_len-1, 1]
        px = self.max_pool(x) #序列长度减半 [batch_size, num_filters, (seq_len-1)/2, 1]
        # 等长卷积 序列长度不变 通过卷积和填充操作实现
        # 激活函数在卷积操作之前
        x = self.padding1(px) 
        x = F.relu(x)
        x = self.conv(x)
        x = self.padding1(x)
        x = F.relu(x)
        x = self.conv(x) #[batch_size, num_filters, (seq_len-1)/2, 1]
        
        x = x + px  # short cut
        return x

6. ERNIE

  • 配置类
class Config(object):

    """配置参数"""
    def __init__(self, dataset):
        self.model_name = 'ERNIE'
        # 训练集、验证集、测试集路径
        self.train_path = dataset + '/data/train.txt'
        self.dev_path = dataset + '/data/dev.txt'
        self.test_path = dataset + '/data/test.txt'
        # 类别名单
        self.class_list = [x.strip() for x in open(
            dataset + '/data/class.txt').readlines()]
        # 存储模型的训练结果
        self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # 设备

        self.require_improvement = 1000  # 若超过1000batch效果还没提升,则提前结束训练
        self.num_classes = len(self.class_list)  # 类别数
        self.num_epochs = 3  # epoch数
        self.batch_size = 128  # mini-batch大小
        self.pad_size = 32  # 每句话处理成的长度(短填长切)
        self.learning_rate = 5e-5  # 学习率

        # 预训练模型相关文件(模型文件.bin、配置文件.json、词表文件vocab.txt)存储路径
        self.bert_path = './ERNIE_pretrain'
        # 序列切分工具
        self.tokenizer = BertTokenizer.from_pretrained(self.bert_path)
        print(self.tokenizer)
        # 隐藏单元数
        self.hidden_size = 768
  • 模型定义类
class Model(nn.Module):

    def __init__(self, config):
        super(Model, self).__init__()
        # 加载ERNIE预训练模型
        self.bert = BertModel.from_pretrained(config.bert_path)
        # 微调
        for param in self.bert.parameters():
            param.requires_grad = True  # finetuning
        # 输出层
        self.fc = nn.Linear(config.hidden_size, config.num_classes)

    def forward(self, x):
        context = x[0]  # 输入的句子  (batch,seq_len)
        mask = x[2]  # 对padding部分进行mask,和句子一个size,padding部分用0表示,如:[1, 1, 1, 1, 0, 0]
        _, pooled = self.bert(context, attention_mask=mask,
                              output_all_encoded_layers=False)  # pooled (batch,hidden_size) cls对应的最后一层的编码向量
        out = self.fc(pooled)  # (batch,classes)
        return out

 

发布了405 篇原创文章 · 获赞 765 · 访问量 14万+

猜你喜欢

转载自blog.csdn.net/sdu_hao/article/details/104106432