import logging
logging.basicConfig(filename=os.path.join(os.getcwd(), 'log.txt'), level=logging.DEBUG)
#os.getcwd()获取当前工作路径,'log.txt'为保存的文件名称,logging.DEBUG为log的优先级别
logging.debug(model) #将model保存早log中
# logging.debug('Epoch: [{0}][{1}/{2}]\t'
# 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
# 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
# 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
# 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
# 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
# epoch, i, len(train_loader), batch_time=batch_time,
# data_time=data_time, loss=losses, top1=top1, top5=top5))
logging.debug('\nTest set: Average loss: {}, Accuracy: ({})\n'.format(losses.avg, top1.avg)) #将loss,准确率Accuracy保存到log中
logging.debug('lr: {}'.format(lr)) #将学习率保存到log中
保存日志文件
猜你喜欢
转载自blog.csdn.net/kkkxiong1/article/details/80052055
今日推荐
周排行