Skip to content
Snippets Groups Projects
Unverified Commit 25ede58a authored by Kamran Melikov's avatar Kamran Melikov Committed by GitHub
Browse files

Add ability to specify device ids for non-distributed training (#2400)

* Add ability to specify device ids for non-distributed training

Changes to be committed:
modified:   mmdet/apis/train.py
modified:   tools/train.py

* Deprecate cfg.gpus in favor of cfg.gpu_ids

Changes to be committed:
modified:   mmdet/apis/train.py
modified:   tools/train.py

* Fix strange issue with mutually_exclusive_group

Changes to be committed:
modified:   tools/train.py

* Fix gpu-ids nargs parameter

Changes to be committed:
modified:   tools/train.py
parent 365c9302
No related branches found
No related tags found
No related merge requests found
......@@ -195,12 +195,12 @@ def _non_dist_train(model,
ds,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
cfg.gpus,
len(cfg.gpu_ids),
dist=False,
seed=cfg.seed) for ds in dataset
]
# put model on gpus
model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
model = MMDataParallel(model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
......
......@@ -27,12 +27,18 @@ def parse_args():
'--validate',
action='store_true',
help='whether to evaluate the checkpoint during training')
parser.add_argument(
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
default=1,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
......@@ -67,11 +73,14 @@ def main():
cfg.work_dir = args.work_dir
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8
cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment