多模态大模型 Blip代码解读

1)配置文件retrieval_coco.yaml


dataset: 'coco'image_root: '/export/share/datasets/vision/coco/images/'  # 图像根目录
ann_root: 'annotation'  # 注释根目录
dataset : coco  # 数据增强

# set pretrained as a file path or an url
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'  # 预训练模型路径

# size of vit model; base or large
vit: 'base'  # vit模型大小,可选'base'或'large'
batch_size_train: 32  # 训练批量大小
batch_size_test: 64  # 测试批量大小
vit_grad_ckpt: True  # 是否使用vit梯度检查点
vit_ckpt_layer: 4  # vit检查点层数
init_lr: 1e-5  # 初始学习率

image_size: 384  # 图像大小
queue_size: 57600  # 队列大小
alpha: 0.4  # 损失函数中的超参数
k_test: 256  # 测试时的k值
negative_all_rank: True  # 是否使用所有负样本进行排名

# optimizer
weight_decay: 0.05  # 权重衰减
min_lr: 0  # 最小学习率
max_epoch: 6  # 最大训练轮数

2)train_retrieval.py代码


parser = argparse.ArgumentParser()     # 创建一个参数解析器
    parser.add_argument('--config', default='./configs/retrieval_flickr.yaml')  # 添加一个参数'--config',默认值为'./configs/retrieval_flickr.yaml'
    parser.add_argument('--output_dir', default='output/Retrieval_flickr')        # 添加一个参数'--output_dir',默认值为'output/Retrieval_flickr'
    parser.add_argument('--evaluate', action='store_true')  # 添加一个参数'--evaluate',如果存在则将其值设为True
    parser.add_argument('--device', default='cuda')  # 添加一个参数'--device',默认值为'cuda'
    parser.add_argument('--seed', default=42, type=int)  # 添加一个参数'--seed',默认值为42,类型为整数
    parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')    # 添加一个参数'--world_size',默认值为1,类型为整数,帮助信息为'distributed processes的数量'
    parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')  # 添加一个参数'--dist_url',默认值为'env://',帮助信息为'用于设置分布式训练的URL'
    parser.add_argument('--distributed', default=True, type=bool)  # 添加一个参数'--distributed',默认值为True,类型为布尔值
    args = parser.parse_args()  # 解析参数

    config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)  # 从文件中加载配置

    Path(args.output_dir).mkdir(parents=True, exist_ok=True)  # 创建输出目录
        
    yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))  # 将配置保存到文件中
    
    main(args, config)  # 调用main函数,传入参数args和config


def  main(args,config)
    # 初始化分布式模式
    utils.init_distributed_mode(args)    
        
    # 获取设备
    device = torch.device(args.device)

    # 设置随机数种子
    seed = args.seed + utils.get_rank()
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    cudnn.benchmark = True

    #### 数据集 #### 
    # 创建检索数据集
    print("Creating retrieval dataset")
    train_dataset, val_dataset, test_dataset = create_dataset('retrieval_%s'%config['dataset'], config)  

    if args.distributed:
        num_tasks = utils.get_world_size()
        global_rank = utils.get_rank()            
        samplers = create_sampler([train_dataset], [True], num_tasks, global_rank) + [None, None]
    else:
        samplers = [None, None, None]

    # 创建数据加载器
    train_loader, val_loader, test_loader = create_loader([train_dataset, val_dataset, test_dataset],samplers,
                                                        batch_size=[config['batch_size_train']]+[config['batch_size_test']]*2,
                                                        num_workers=[4,4,4],
                                                        is_trains=[True, False, False], 
                                                        collate_fns=[None,None,None])


#### 模型 #### 
    # 创建模型
    print("创建模型")
    model = blip_retrieval(pretrained=config['pretrained'], image_size=config['image_size'], vit=config['vit'], 
                             vit_grad_ckpt=config['vit_grad_ckpt'], vit_ckpt_layer=config['vit_ckpt_layer'], 
                             queue_size=config['queue_size'], negative_all_rank=config['negative_all_rank'])

    model = model.to(device)   
    
    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
        model_without_ddp = model.module   

    optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay']) 
    
    best = 0
    best_epoch = 0

    # 开始训练
    print("开始训练")
    start_time = time.time()    

    for epoch in range(0, config['max_epoch']):    
        if not args.evaluate:        
            if args.distributed:
                train_loader.sampler.set_epoch(epoch)
                
            cosine_lr_schedule(optimizer, epoch, config['max_epoch'], config['init_lr'], config['min_lr'])
            
            train_stats = train(model, train_loader, optimizer, epoch, device, config)  
            
        score_val_i2t, score_val_t2i, = evaluation(model_without_ddp, val_loader, device, config)
        score_test_i2t, score_test_t2i = evaluation(model_without_ddp, test_loader, device, config)
    
        if utils.is_main_process():  
      
            val_result = itm_eval(score_val_i2t, score_val_t2i, val_loader.dataset.txt2img, val_loader.dataset.img2txt)  
            print(val_result)
                                
            if val_result['r_mean']>best:
                save_obj = {
    
    
                    'model': model_without_ddp.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'config': config,
                    'epoch': epoch,
                }
                torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_best.pth'))  
                best = val_result['r_mean']        
                best_epoch = epoch  
                
                test_result = itm_eval(score_test_i2t, score_test_t2i, test_loader.dataset.txt2img, test_loader.dataset.img2txt) 
                print(test_result)
            
            if args.evaluate:                
                log_stats = {
    
    **{
    
    f'val_{
      
      k}': v for k, v in val_result.items()},
                             **{
    
    f'test_{
      
      k}': v for k, v in test_result.items()},                  
                            }
                with open(os.path.join(args.output_dir, "evaluate.txt"),"a") as f:
                    f.write(json.dumps(log_stats) + "\n")     
            else:
                log_stats = {
    
    **{
    
    f'train_{
      
      k}': v for k, v in train_stats.items()},
                             **{
    
    f'val_{
      
      k}': v for k, v in val_result.items()},
                             **{
    
    f'test_{
      
      k}': v for k, v in test_result.items()},  
                             'epoch': epoch,



def train(model, data_loader, optimizer, epoch, device, config):
    # train
    model.train()  
    
    # 初始化度量记录器
    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    metric_logger.add_meter('loss_itm', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
    metric_logger.add_meter('loss_ita', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
    header = 'Train Epoch: [{}]'.format(epoch)
    print_freq = 50

    # 遍历数据集
    for i,(image, caption, idx) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
        # 将数据移动到设备上
        image = image.to(device,non_blocking=True)   
        idx = idx.to(device,non_blocking=True)   
       
        # 计算alpha
        if epoch>0:
            alpha = config['alpha']
        else:
            alpha = config['alpha']*min(1,i/len(data_loader))


        loss_ita, loss_itm = model(image, caption, alpha=alpha, idx=idx)                  
        loss = loss_ita + loss_itm
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()    
        
        metric_logger.update(loss_itm=loss_itm.item())
        metric_logger.update(loss_ita=loss_ita.item())
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger.global_avg())     
    return {
    
    k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}  

# 下面是训练函数,包括模型训练、指标记录等
# model: 模型
# data_loader: 数据加载器
# optimizer: 优化器
# epoch: 当前训练轮数
# device: 训练设备
# config: 配置参数
# 定义评估函数


@torch.no_grad()
def evaluation(model, data_loader, device, config):
    # test
    model.eval() 
    
    # 定义度量日志器
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Evaluation:'    
    
    # 计算文本特征
    print('Computing features for evaluation...')
    start_time = time.time()  

    texts = data_loader.dataset.text   
    num_text = len(texts)
    text_bs = 256
    text_ids = []
    text_embeds = []  
    text_atts = []
    for i in range(0, num_text, text_bs):
        text = texts[i: min(num_text, i+text_bs)]
        text_input = model.tokenizer(text, padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(device) 
        text_output = model.text_encoder(text_input.input_ids, attention_mask = text_input.attention_mask, mode='text')  
        text_embed = F.normalize(model.text_proj(text_output.last_hidden_state[:,0,:]))
        text_embeds.append(text_embed)   
        text_ids.append(text_input.input_ids)
        text_atts.append(text_input.attention_mask)
    
    text_embeds = torch.cat(text_embeds,dim=0)
    text_ids = torch.cat(text_ids,dim=0)
    text_atts = torch.cat(text_atts,dim=0)
    text_ids[:,0] = model.tokenizer.enc_token_id
    
    # 计算图像特征
    image_feats = []
    image_embeds = []
    for image, img_id in data_loader: 
        image = image.to(device) 
        image_feat = model.visual_encoder(image)   
        image_embed = model.vision_proj(image_feat[:,0,:])            
        image_embed = F.normalize(image_embed,dim=-1)      
        
        image_feats.append(image_feat.cpu())
        image_embeds.append(image_embed)
     
    image_feats = torch.cat(image_feats,dim=0)
    image_embeds = torch.cat(image_embeds,dim=0)
    
    # 计算相似度矩阵
    sims_matrix = image_embeds @ text_embeds.t()
    score_matrix_i2t = torch.full((len(data_loader.dataset.image),len(texts)),-100.0).to(device)
    
    num_tasks = utils.get_world_size()
    rank = utils.get_rank() 
    step = sims_matrix.size(0)//num_tasks + 1
    start = rank*step
    end = min(sims_matrix.size(0),start+step)

    # 计算图像到文本的分数矩阵
    for i,sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)): 
        topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)

        encoder_output = image_feats[start+i].repeat(config['k_test'],1,1).to(device)
        encoder_att = torch.ones(encoder_output.size()[:-1],dtype=torch.long).to(device)
        output = model.text_encoder(text_ids[topk_idx], 
                                    attention_mask = text_atts[topk_idx],
                                    encoder_hidden_states = encoder_output,
                                    encoder_attention_mask = encoder_att,                             
                                    return_dict = True,
                                   )
        score = model.itm_head(output.last_hidden_state[:,0,:])[:,1]
        score_matrix_i2t[start+i,topk_idx] = score + topk_sim
        
    sims_matrix = sims_matrix.t()
    score_matrix_t2i = torch.full((len(texts),len(data_loader.dataset.image)),-100.0).to(device)
    
    step = sims_matrix.size(0)//num_tasks + 1
    start = rank*step
    end = min(sims_matrix.size(0),start+step)    
    
    # 计算文本到图像的分数矩阵
    for i,sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)): 
        
        topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)
        encoder_output = image_feats[topk_idx].to(device)



def itm_eval(scores_i2t, scores_t2i, txt2img, img2txt):
    
    # 计算图像到文本的排名
    ranks = np.zeros(scores_i2t.shape[0])
    for index,score in enumerate(scores_i2t):
        inds = np.argsort(score)[::-1]
        # 计算排名
        rank = 1e20
        for i in img2txt[index]:
            tmp = np.where(inds == i)[0][0]
            if tmp < rank:
                rank = tmp
        ranks[index] = rank

    # 计算指标
    tr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
    tr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
    tr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
  
    # 计算文本到图像的排名
    ranks = np.zeros(scores_t2i.shape[0])
    
    for index,score in enumerate(scores_t2i):
        inds = np.argsort(score)[::-1]
        ranks[index] = np.where(inds == txt2img[index])[0][0]

    # 计算指标
    ir1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
    ir5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
    ir10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)        

    tr_mean = (tr1 + tr5 + tr10) / 3
    ir_mean = (ir1 + ir5 + ir10) / 3
    r_mean = (tr_mean + ir_mean) / 2

    # 返回评估结果
    eval_result =  {
    
    'txt_r1': tr1,
                    'txt_r5': tr5,
                    'txt_r10': tr10,
                    'txt_r_mean': tr_mean,
                    'img_r1': ir1,
                    'img_r5': ir5,
                    'img_r10': ir10,
                    'img_r_mean': ir_mean,
                    'r_mean': r_mean}
    return eval_result


猜你喜欢

转载自blog.csdn.net/qq_40905284/article/details/130686913