Skip to content
Snippets Groups Projects
Commit db9aaac7 authored by Kai Chen's avatar Kai Chen
Browse files

fix eval hooks

parent 0683b50d
No related branches found
No related tags found
No related merge requests found
from .class_names import (voc_classes, imagenet_det_classes,
imagenet_vid_classes, coco_classes, dataset_aliases,
get_classes)
from .coco_utils import coco_eval
from .coco_utils import coco_eval, results2json
from .eval_hooks import DistEvalHook, DistEvalRecallHook, CocoDistEvalmAPHook
from .mean_ap import average_precision, eval_map, print_map_summary
from .recall import (eval_recalls, print_recall_summary, plot_num_recall,
plot_iou_recall)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'dataset_aliases', 'get_classes', 'average_precision',
'eval_map', 'print_map_summary', 'eval_recalls', 'print_recall_summary',
'plot_num_recall', 'plot_iou_recall', 'coco_eval'
'coco_classes', 'dataset_aliases', 'get_classes', 'coco_eval',
'results2json', 'DistEvalHook', 'DistEvalRecallHook',
'CocoDistEvalmAPHook', 'average_precision', 'eval_map',
'print_map_summary', 'eval_recalls', 'print_recall_summary',
'plot_num_recall', 'plot_iou_recall'
]
import mmcv
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
......@@ -24,3 +25,77 @@ def coco_eval(result_file, result_types, coco, max_dets=(100, 300, 1000)):
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def proposal2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def det2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
json_results.append(data)
return json_results
def segm2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
bboxes = det[label]
segms = seg[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
json_results.append(data)
return json_results
def results2json(dataset, results, out_file):
if isinstance(results[0], list):
json_results = det2json(dataset, results)
elif isinstance(results[0], tuple):
json_results = segm2json(dataset, results)
elif isinstance(results[0], np.ndarray):
json_results = proposal2json(dataset, results)
else:
raise TypeError('invalid type of results')
mmcv.dump(json_results, out_file)
import os
import os.path as osp
import shutil
import time
import mmcv
import numpy as np
import torch
from mmcv.torchpack import Hook, obj_from_dict
from pycocotools.cocoeval import COCOeval
from torch.utils.data import Dataset
from .coco_utils import results2json
from .recall import eval_recalls
from ..parallel import scatter
from mmdet import datasets
from mmdet.datasets.loader import collate
class DistEvalHook(Hook):
def __init__(self, dataset, interval=1):
if isinstance(dataset, Dataset):
self.dataset = dataset
elif isinstance(dataset, dict):
self.dataset = obj_from_dict(dataset, datasets,
{'test_mode': True})
else:
raise TypeError(
'dataset must be a Dataset object or a dict, not {}'.format(
type(dataset)))
self.interval = interval
self.lock_dir = None
def _barrier(self, rank, world_size):
"""Due to some issues with `torch.distributed.barrier()`, we have to
implement this ugly barrier function.
"""
if rank == 0:
for i in range(1, world_size):
tmp = osp.join(self.lock_dir, '{}.pkl'.format(i))
while not (osp.exists(tmp)):
time.sleep(1)
for i in range(1, world_size):
tmp = osp.join(self.lock_dir, '{}.pkl'.format(i))
os.remove(tmp)
else:
tmp = osp.join(self.lock_dir, '{}.pkl'.format(rank))
mmcv.dump([], tmp)
while osp.exists(tmp):
time.sleep(1)
def before_run(self, runner):
self.lock_dir = osp.join(runner.work_dir, '.lock_map_hook')
if runner.rank == 0:
if osp.exists(self.lock_dir):
shutil.rmtree(self.lock_dir)
mmcv.mkdir_or_exist(self.lock_dir)
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
runner.model.eval()
results = [None for _ in range(len(self.dataset))]
prog_bar = mmcv.ProgressBar(len(self.dataset))
for idx in range(runner.rank, len(self.dataset), runner.world_size):
data = self.dataset[idx]
data_gpu = scatter(
collate([data], samples_per_gpu=1),
[torch.cuda.current_device()])[0]
# compute output
with torch.no_grad():
result = runner.model(
**data_gpu, return_loss=False, rescale=True)
results[idx] = result
batch_size = runner.world_size
for _ in range(batch_size):
prog_bar.update()
if runner.rank == 0:
print('\n')
self._barrier(runner.rank, runner.world_size)
for i in range(1, runner.world_size):
tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(i))
tmp_results = mmcv.load(tmp_file)
for idx in range(i, len(results), runner.world_size):
results[idx] = tmp_results[idx]
os.remove(tmp_file)
self.evaluate(runner, results)
else:
tmp_file = osp.join(runner.work_dir,
'temp_{}.pkl'.format(runner.rank))
mmcv.dump(results, tmp_file)
self._barrier(runner.rank, runner.world_size)
self._barrier(runner.rank, runner.world_size)
def evaluate(self):
raise NotImplementedError
class DistEvalRecallHook(DistEvalHook):
def __init__(self,
dataset,
proposal_nums=(100, 300, 1000),
iou_thrs=np.arange(0.5, 0.96, 0.05)):
super(DistEvalRecallHook, self).__init__(dataset)
self.proposal_nums = np.array(proposal_nums, dtype=np.int32)
self.iou_thrs = np.array(iou_thrs, dtype=np.float32)
def evaluate(self, runner, results):
# the official coco evaluation is too slow, here we use our own
# implementation instead, which may get slightly different results
gt_bboxes = []
for i in range(len(self.dataset)):
img_id = self.dataset.img_ids[i]
ann_ids = self.dataset.coco.getAnnIds(imgIds=img_id)
ann_info = self.dataset.coco.loadAnns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes,
results,
self.proposal_nums,
self.iou_thrs,
print_summary=False)
ar = recalls.mean(axis=1)
for i, num in enumerate(self.proposal_nums):
runner.log_buffer.output['AR@{}'.format(num)] = ar[i]
runner.log_buffer.ready = True
class CocoDistEvalmAPHook(DistEvalHook):
def evaluate(self, runner, results):
tmp_file = osp.join(runner.work_dir, 'temp_0.json')
results2json(self.dataset, results, tmp_file)
res_types = ['bbox',
'segm'] if runner.model.module.with_mask else ['bbox']
cocoGt = self.dataset.coco
cocoDt = cocoGt.loadRes(tmp_file)
imgIds = cocoGt.getImgIds()
for res_type in res_types:
iou_type = res_type
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
field = '{}_mAP'.format(res_type)
runner.log_buffer.output[field] = cocoEval.stats[0]
runner.log_buffer.ready = True
os.remove(tmp_file)
from .dist_utils import (init_dist, reduce_grads, DistOptimizerHook,
DistSamplerSeedHook)
from .hooks import (EmptyCacheHook, DistEvalHook, DistEvalRecallHook,
CocoDistEvalmAPHook)
from .misc import tensor2imgs, unmap, results2json, multi_apply
from .hooks import EmptyCacheHook
from .misc import tensor2imgs, unmap, multi_apply
__all__ = [
'init_dist', 'reduce_grads', 'DistOptimizerHook', 'DistSamplerSeedHook',
'EmptyCacheHook', 'DistEvalHook', 'DistEvalRecallHook',
'CocoDistEvalmAPHook', 'tensor2imgs', 'unmap', 'results2json',
'multi_apply'
'EmptyCacheHook', 'tensor2imgs', 'unmap', 'multi_apply'
]
......@@ -39,7 +39,7 @@ def _init_dist_slurm(backend, **kwargs):
# modified from https://github.com/NVIDIA/apex/blob/master/apex/parallel/distributed.py#L9
def coalesce_all_reduce(tensors):
def all_reduce_coalesced(tensors):
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
......@@ -64,7 +64,7 @@ def reduce_grads(model, coalesce=True):
if param.requires_grad and param.grad is not None
]
if coalesce:
coalesce_all_reduce(grads)
all_reduce_coalesced(grads)
else:
for tensor in grads:
dist.all_reduce(tensor)
......
import os
import os.path as osp
import shutil
import time
import mmcv
import numpy as np
import torch
from mmcv.torchpack import Hook
from pycocotools.cocoeval import COCOeval
from ..eval import eval_recalls
from ..parallel import scatter
from mmdet.datasets.loader import collate
class EmptyCacheHook(Hook):
......@@ -21,220 +9,3 @@ class EmptyCacheHook(Hook):
def after_epoch(self, runner):
torch.cuda.empty_cache()
class DistEvalHook(Hook):
def __init__(self, dataset, interval=1):
self.dataset = dataset
self.interval = interval
self.lock_dir = None
def _barrier(self, rank, world_size):
"""Due to some issues with `torch.distributed.barrier()`, we have to
implement this ugly barrier function.
"""
if rank == 0:
for i in range(1, world_size):
tmp = osp.join(self.lock_dir, '{}.pkl'.format(i))
while not (osp.exists(tmp)):
time.sleep(1)
for i in range(1, world_size):
tmp = osp.join(self.lock_dir, '{}.pkl'.format(i))
os.remove(tmp)
else:
tmp = osp.join(self.lock_dir, '{}.pkl'.format(rank))
mmcv.dump([], tmp)
while osp.exists(tmp):
time.sleep(1)
def before_run(self, runner):
self.lock_dir = osp.join(runner.work_dir, '.lock_map_hook')
if runner.rank == 0:
if osp.exists(self.lock_dir):
shutil.rmtree(self.lock_dir)
mmcv.mkdir_or_exist(self.lock_dir)
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
runner.model.eval()
results = [None for _ in range(len(self.dataset))]
prog_bar = mmcv.ProgressBar(len(self.dataset))
for idx in range(runner.rank, len(self.dataset), runner.world_size):
data = self.dataset[idx]
device_id = torch.cuda.current_device()
imgs_data = tuple(
scatter(collate([data], samples_per_gpu=1), [device_id])[0])
# compute output
with torch.no_grad():
result = runner.model(
*imgs_data,
return_loss=False,
return_bboxes=True,
rescale=True)
results[idx] = result
batch_size = runner.world_size
for _ in range(batch_size):
prog_bar.update()
if runner.rank == 0:
print('\n')
self._barrier(runner.rank, runner.world_size)
for i in range(1, runner.world_size):
tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(i))
tmp_results = mmcv.load(tmp_file)
for idx in range(i, len(results), runner.world_size):
results[idx] = tmp_results[idx]
os.remove(tmp_file)
self.evaluate(runner, results)
else:
tmp_file = osp.join(runner.work_dir,
'temp_{}.pkl'.format(runner.rank))
mmcv.dump(results, tmp_file)
self._barrier(runner.rank, runner.world_size)
self._barrier(runner.rank, runner.world_size)
def evaluate(self):
raise NotImplementedError
class CocoEvalMixin(object):
def _xyxy2xywh(self, bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def det2json(self, dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self._xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
json_results.append(data)
return json_results
def segm2json(self, dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
bboxes = det[label]
segms = seg[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self._xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
json_results.append(data)
return json_results
def proposal2json(self, dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self._xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def results2json(self, dataset, results, out_file):
if isinstance(results[0], list):
json_results = self.det2json(dataset, results)
elif isinstance(results[0], tuple):
json_results = self.segm2json(dataset, results)
elif isinstance(results[0], np.ndarray):
json_results = self.proposal2json(dataset, results)
else:
raise TypeError('invalid type of results')
mmcv.dump(json_results, out_file, file_format='json')
class DistEvalRecallHook(DistEvalHook):
def __init__(self,
dataset,
proposal_nums=(100, 300, 1000),
iou_thrs=np.arange(0.5, 0.96, 0.05)):
super(DistEvalRecallHook, self).__init__(dataset)
self.proposal_nums = np.array(proposal_nums, dtype=np.int32)
self.iou_thrs = np.array(iou_thrs, dtype=np.float32)
def evaluate(self, runner, results):
# official coco evaluation is too slow, here we use our own
# implementation, which may get slightly different results
gt_bboxes = []
for i in range(len(self.dataset)):
img_id = self.dataset.img_ids[i]
ann_ids = self.dataset.coco.getAnnIds(imgIds=img_id)
ann_info = self.dataset.coco.loadAnns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes,
results,
self.proposal_nums,
self.iou_thrs,
print_summary=False)
ar = recalls.mean(axis=1)
for i, num in enumerate(self.proposal_nums):
runner.log_buffer.output['AR@{}'.format(num)] = ar[i]
runner.log_buffer.ready = True
class CocoDistEvalmAPHook(DistEvalHook, CocoEvalMixin):
def evaluate(self, runner, results):
tmp_file = osp.join(runner.work_dir, 'temp_0.json')
self.results2json(self.dataset, results, tmp_file)
res_types = ['bbox', 'segm'] if runner.model.with_mask else ['bbox']
cocoGt = self.dataset.coco
cocoDt = cocoGt.loadRes(tmp_file)
imgIds = cocoGt.getImgIds()
for res_type in res_types:
iou_type = res_type
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
field = '{}_mAP'.format(res_type)
runner.log_buffer.output[field] = cocoEval.stats[0]
runner.log_buffer.ready = True
os.remove(tmp_file)
......@@ -4,6 +4,7 @@ import mmcv
import numpy as np
from six.moves import map, zip
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
num_imgs = tensor.size(0)
mean = np.array(mean, dtype=np.float32)
......@@ -33,77 +34,3 @@ def unmap(data, count, inds, fill=0):
ret = data.new_full(new_size, fill)
ret[inds, :] = data
return ret
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def proposal2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def det2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
json_results.append(data)
return json_results
def segm2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
bboxes = det[label]
segms = seg[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
json_results.append(data)
return json_results
def results2json(dataset, results, out_file):
if isinstance(results[0], list):
json_results = det2json(dataset, results)
elif isinstance(results[0], tuple):
json_results = segm2json(dataset, results)
elif isinstance(results[0], np.ndarray):
json_results = proposal2json(dataset, results)
else:
raise TypeError('invalid type of results')
mmcv.dump(json_results, out_file)
......@@ -93,16 +93,26 @@ data = dict(
flip_ratio=0.5,
with_mask=False,
with_crowd=True,
with_label=True,
test_mode=False),
test=dict(
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
......@@ -128,7 +138,7 @@ log_config = dict(
# runtime settings
total_epochs = 12
device_ids = range(8)
dist_params = dict(backend='nccl', port='29500')
dist_params = dict(backend='gloo')
log_level = 'INFO'
work_dir = './work_dirs/fpn_faster_rcnn_r50_1x'
load_from = None
......
......@@ -106,16 +106,26 @@ data = dict(
flip_ratio=0.5,
with_mask=True,
with_crowd=True,
with_label=True,
test_mode=False),
test=dict(
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=True,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
......@@ -141,7 +151,7 @@ log_config = dict(
# runtime settings
total_epochs = 12
device_ids = range(8)
dist_params = dict(backend='nccl', port='29500')
dist_params = dict(backend='gloo')
log_level = 'INFO'
work_dir = './work_dirs/fpn_mask_rcnn_r50_1x'
load_from = None
......
......@@ -65,16 +65,26 @@ data = dict(
flip_ratio=0.5,
with_mask=False,
with_crowd=False,
with_label=False,
test_mode=False),
test=dict(
with_label=False),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=False,
with_label=False),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
......@@ -103,5 +113,5 @@ dist_params = dict(backend='gloo')
log_level = 'INFO'
work_dir = './work_dirs/fpn_rpn_r50_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
resume_from = None
workflow = [('train', 1), ('val', 1)]
......@@ -2,4 +2,4 @@
PYTHON=${PYTHON:-"python"}
$PYTHON -m torch.distributed.launch --nproc_per_node=$2 train.py $1 --launcher pytorch
\ No newline at end of file
$PYTHON -m torch.distributed.launch --nproc_per_node=$2 train.py $1 --launcher pytorch $3
......@@ -59,7 +59,7 @@ def main():
cfg.model.pretrained = None
cfg.data.test.test_mode = True
dataset = obj_from_dict(cfg.data.test, datasets)
dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
if args.gpus == 1:
model = build_detector(
cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
......
......@@ -9,9 +9,10 @@ from mmcv.torchpack import Runner, obj_from_dict
from mmdet import datasets
from mmdet.core import (init_dist, DistOptimizerHook, DistSamplerSeedHook,
MMDataParallel, MMDistributedDataParallel)
MMDataParallel, MMDistributedDataParallel,
DistEvalRecallHook, CocoDistEvalmAPHook)
from mmdet.datasets.loader import build_dataloader
from mmdet.models import build_detector
from mmdet.models import build_detector, RPN
def parse_losses(losses):
......@@ -109,6 +110,11 @@ def main():
cfg.checkpoint_config, cfg.log_config)
if dist:
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if isinstance(model.module, RPN):
runner.register_hook(DistEvalRecallHook(cfg.data.val))
elif cfg.data.val.type == 'CocoDataset':
runner.register_hook(CocoDistEvalmAPHook(cfg.data.val))
if cfg.resume_from:
runner.resume(cfg.resume_from)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment