HuggingFace学习笔记--Trainer的使用

1--Trainer的使用

        利用 Trainer 可以快速进行模型训练的配置,一般需要设置训练的模型以及训练相关参数等;

1-1--简单Demo代码

import evaluate
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments, DataCollatorWithPadding

tokenizer = AutoTokenizer.from_pretrained("hfl/rbt3") # 分词

def process_function(examples):
    tokenized_examples = tokenizer(examples["review"], max_length=128, truncation=True)
    tokenized_examples["labels"] = examples["label"]
    return tokenized_examples

# 创建评估函数
acc_metric = evaluate.load("accuracy")
f1_metirc = evaluate.load("f1")
def eval_metric(eval_predict):
    predictions, labels = eval_predict
    predictions = predictions.argmax(axis=-1)
    acc = acc_metric.compute(predictions=predictions, references=labels)
    f1 = f1_metirc.compute(predictions=predictions, references=labels)
    acc.update(f1)
    return acc

if __name__ == "__main__":
    # 处理数据集
    dataset = load_dataset("csv", data_files="./ChnSentiCorp_htl_all.csv", split="train")
    dataset = dataset.filter(lambda x: x["review"] is not None) # 过滤字段 review 为 None 的数据
    datasets = dataset.train_test_split(test_size = 0.1) # 划分训练和测试集
    tokenized_datasets = datasets.map(process_function, batched = True, remove_columns = datasets["train"].column_names)
    
    # 初始化模型
    model = AutoModelForSequenceClassification.from_pretrained("hfl/rbt3")
    # print(model.config)
    
    # 训练参数
    train_args = TrainingArguments(output_dir="./checkpoints",  # 输出文件夹
                               per_device_train_batch_size=64,  # 训练时的batch_size
                               per_device_eval_batch_size=128,  # 验证时的batch_size
                               logging_steps=10,                # log 打印的频率
                               evaluation_strategy="epoch",     # 评估策略
                               num_train_epochs = 5,            # 训练epoch数
                               save_strategy="epoch",           # 保存策略
                               save_total_limit=3,              # 最大保存数
                               learning_rate=2e-5,              # 学习率
                               weight_decay=0.01,               # weight_decay
                               metric_for_best_model="f1",      # 设定评估指标
                               load_best_model_at_end=True)     # 训练完成后加载最优模型
    
    # 创建trainer
    trainer = Trainer(model = model, # 训练模型
                  args = train_args, # 训练参数
                  train_dataset = tokenized_datasets["train"], # 训练集
                  eval_dataset = tokenized_datasets["test"], # 测试集
                  data_collator = DataCollatorWithPadding(tokenizer=tokenizer),
                  compute_metrics = eval_metric) # 评估函数
    
    # 模型训练
    trainer.train()
    # 模型评估
    trainer.evaluate(tokenized_datasets["test"])
    # 模型测试
    trainer.predict(tokenized_datasets["test"])
    print("All done!")

猜你喜欢

转载自blog.csdn.net/weixin_43863869/article/details/134741857
今日推荐