继续上篇改的代码有点多,不足之处大佬指出
运行命令:
python tools/train_net.py --use-tensorboard --config-file experiments/cfgs/e2e_faster_rcnn_R_50_FPN_1x.yaml
1、修改maskrcnn_benchmark/config/defaults.py在最后添加
_C.TENSORBOARD_EXPERIMENT = "logs/maskrcnn-benchmark" # tensorboard experiment location
2、修改maskrcnn_benchmark/engine/trainer.py更改如下
from maskrcnn_benchmark.utils.comm import get_world_size
#from maskrcnn_benchmark.utils.metric_logger import MetricLogger #注释掉
.........
def do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
meters #添加的参数
):
logger = logging.getLogger("maskrcnn_benchmark.trainer")
logger.info("Start training")
#meters = MetricLogger(delimiter=" ") #注释掉
3、修改maskrcnn_benchmark/utils/metric_logger.py
import time #添加
from collections import defaultdict
from collections import deque
from datetime import datetime #添加
import torch
from .comm import is_main_process #添加
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {:.4f} ({:.4f})".format(name, meter.median, meter.global_avg)
)
return self.delimiter.join(loss_str)
############添加以下代码
class TensorboardLogger(MetricLogger):
def __init__(self,
log_dir,
start_iter=0,
delimiter='\t'):
super(TensorboardLogger, self).__init__(delimiter)
self.iteration = start_iter
self.writer = self._get_tensorboard_writer(log_dir)
@staticmethod
def _get_tensorboard_writer(log_dir):
try:
from tensorboardX import SummaryWriter
except ImportError:
raise ImportError(
'To use tensorboard please install tensorboardX '
'[ pip install tensorflow tensorboardX ].'
)
if is_main_process():
timestamp = datetime.fromtimestamp(time.time()).strftime('%Y%m%d-%H:%M')
tb_logger = SummaryWriter('{}-{}'.format(log_dir, timestamp))
return tb_logger
else:
return None
def update(self, ** kwargs):
super(TensorboardLogger, self).update(**kwargs)
if self.writer:
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.writer.add_scalar(k, v, self.iteration)
self.iteration += 1
4、修改tools/train_net.py
import os
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
#from maskrcnn_benchmark.solver import make_lr_scheduler #注释掉
#from maskrcnn_benchmark.solver import make_optimizer #注释掉
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.engine.trainer import do_train
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.solver import make_lr_scheduler #添加
from maskrcnn_benchmark.solver import make_optimizer #添加
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.metric_logger import (
MetricLogger, TensorboardLogger) #添加
from maskrcnn_benchmark.utils.miscellaneous import mkdir
#def train(cfg, local_rank, distributed): # 注释掉换成下面的定义方式
def train(cfg, local_rank, distributed, use_tensorboard=False):
model = build_detection_model(cfg)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD #在checkpoint_period 添加以下函数
if use_tensorboard:
meters = TensorboardLogger(
log_dir=cfg.TENSORBOARD_EXPERIMENT,
start_iter=arguments['iteration'],
delimiter=" ")
else:
meters = MetricLogger(delimiter=" ")
do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
meters #添加meters
)
help="Do not test the final model",
action="store_true",
)#添加下面的函数
parser.add_argument(
"--use-tensorboard",
dest="use_tensorboard",
help="Use tensorboardX logger (Requires tensorboardX installed)",
action="store_true",
default=False
)
# model = train(cfg, args.local_rank, args.distributed) 注释掉换成以下函数
model = train(
cfg=cfg,
local_rank=args.local_rank,
distributed=args.distributed,
use_tensorboard=args.use_tensorboard
)
train_net.py整体代码:
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
r"""
Basic training script for PyTorch
"""
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
#from maskrcnn_benchmark.solver import make_lr_scheduler
#from maskrcnn_benchmark.solver import make_optimizer
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.engine.trainer import do_train
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.solver import make_lr_scheduler
from maskrcnn_benchmark.solver import make_optimizer
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.metric_logger import (
MetricLogger, TensorboardLogger)
from maskrcnn_benchmark.utils.miscellaneous import mkdir
#def train(cfg, local_rank, distributed):
def train(cfg, local_rank, distributed, use_tensorboard=False):
model = build_detection_model(cfg)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
optimizer = make_optimizer(cfg, model)
scheduler = make_lr_scheduler(cfg, optimizer)
if distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], output_device=local_rank,
# this should be removed if we update BatchNorm stats
broadcast_buffers=False,
)
arguments = {}
arguments["iteration"] = 0
output_dir = cfg.OUTPUT_DIR
save_to_disk = get_rank() == 0
checkpointer = DetectronCheckpointer(
cfg, model, optimizer, scheduler, output_dir, save_to_disk
)
extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)
arguments.update(extra_checkpoint_data)
data_loader = make_data_loader(
cfg,
is_train=True,
is_distributed=distributed,
start_iter=arguments["iteration"],
)
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
if use_tensorboard:
meters = TensorboardLogger(
log_dir=cfg.TENSORBOARD_EXPERIMENT,
start_iter=arguments['iteration'],
delimiter=" ")
else:
meters = MetricLogger(delimiter=" ")
do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
meters
)
return model
def run_test(cfg, model, distributed):
if distributed:
model = model.module
torch.cuda.empty_cache() # TODO check if it helps
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
if cfg.MODEL.KEYPOINT_ON:
iou_types = iou_types + ("keypoints",)
output_folders = [None] * len(cfg.DATASETS.TEST)
dataset_names = cfg.DATASETS.TEST
if cfg.OUTPUT_DIR:
for idx, dataset_name in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):
inference(
model,
data_loader_val,
dataset_name=dataset_name,
iou_types=iou_types,
box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=output_folder,
)
synchronize()
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
parser.add_argument(
"--config-file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"--skip-test",
dest="skip_test",
help="Do not test the final model",
action="store_true",
)
parser.add_argument(
"--use-tensorboard",
dest="use_tensorboard",
help="Use tensorboardX logger (Requires tensorboardX installed)",
action="store_true",
default=False
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = num_gpus > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
synchronize()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir:
mkdir(output_dir)
logger = setup_logger("maskrcnn_benchmark", output_dir, get_rank())
logger.info("Using {} GPUs".format(num_gpus))
logger.info(args)
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, "r") as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
#model = train(cfg, args.local_rank, args.distributed)
model = train(
cfg=cfg,
local_rank=args.local_rank,
distributed=args.distributed,
use_tensorboard=args.use_tensorboard
)
if not args.skip_test:
run_test(cfg, model, args.distributed)
if __name__ == "__main__":
main()