From d8c1f685b60456401acb6137c2629332c4a73d2b Mon Sep 17 00:00:00 2001
From: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
Date: Sat, 25 Apr 2020 16:13:42 +0800
Subject: [PATCH] Change to use F-string (#2531)

* Change .format to f-string

* Resolve comments

* Fix missing change
---
 mmdet/apis/inference.py                       |  2 +-
 mmdet/apis/test.py                            |  4 +-
 mmdet/apis/train.py                           |  3 +-
 mmdet/core/anchor/anchor_generator.py         | 68 +++++++++----------
 mmdet/core/bbox/assigners/assign_result.py    | 17 +++--
 .../bbox/iou_calculators/iou2d_calculator.py  |  3 +-
 mmdet/core/bbox/samplers/sampling_result.py   |  8 +--
 mmdet/core/evaluation/class_names.py          |  4 +-
 mmdet/core/evaluation/eval_hooks.py           | 10 ++-
 mmdet/core/evaluation/mean_ap.py              |  6 +-
 mmdet/core/evaluation/recall.py               |  5 +-
 mmdet/core/mask/structures.py                 | 15 ++--
 mmdet/core/optimizer/default_constructor.py   | 12 ++--
 mmdet/datasets/cityscapes.py                  | 18 ++---
 mmdet/datasets/coco.py                        | 40 +++++------
 mmdet/datasets/custom.py                      | 10 ++-
 mmdet/datasets/pipelines/compose.py           |  2 +-
 mmdet/datasets/pipelines/formating.py         | 19 +++---
 mmdet/datasets/pipelines/instaboost.py        |  2 +-
 mmdet/datasets/pipelines/loading.py           | 21 +++---
 mmdet/datasets/pipelines/test_aug.py          |  4 +-
 mmdet/datasets/pipelines/transforms.py        | 62 ++++++++---------
 mmdet/datasets/voc.py                         |  7 +-
 mmdet/datasets/wider_face.py                  |  4 +-
 mmdet/datasets/xml_style.py                   |  9 ++-
 mmdet/models/anchor_heads/anchor_head.py      |  2 +-
 mmdet/models/backbones/hrnet.py               | 12 ++--
 mmdet/models/backbones/resnet.py              |  8 +--
 mmdet/models/detectors/base.py                | 20 +++---
 mmdet/models/detectors/fast_rcnn.py           |  8 +--
 mmdet/models/mask_heads/fcn_mask_head.py      |  6 +-
 mmdet/models/necks/fpn.py                     |  2 +-
 mmdet/models/roi_heads/cascade_roi_head.py    |  4 +-
 mmdet/models/roi_heads/htc_roi_head.py        |  4 +-
 mmdet/models/shared_heads/res_layer.py        |  4 +-
 mmdet/ops/activation.py                       |  2 +-
 mmdet/ops/carafe/grad_check.py                | 14 ++--
 mmdet/ops/conv.py                             |  2 +-
 mmdet/ops/dcn/deform_conv.py                  | 25 +++----
 mmdet/ops/nms/nms_wrapper.py                  | 14 ++--
 mmdet/ops/norm.py                             |  2 +-
 mmdet/ops/plugin.py                           |  2 +-
 mmdet/ops/roi_align/roi_align.py              | 10 +--
 mmdet/ops/roi_pool/roi_pool.py                |  6 +-
 .../sigmoid_focal_loss/sigmoid_focal_loss.py  |  4 +-
 mmdet/ops/upsample.py                         |  2 +-
 mmdet/utils/collect_env.py                    |  4 +-
 mmdet/utils/contextmanagers.py                |  2 +-
 mmdet/utils/flops_counter.py                  |  2 +-
 mmdet/utils/logger.py                         |  2 +-
 mmdet/utils/profiling.py                      |  5 +-
 mmdet/utils/util_mixins.py                    | 12 ++--
 setup.py                                      |  8 +--
 tests/async_benchmark.py                      |  6 +-
 tests/test_backbone.py                        |  4 +-
 tests/test_config.py                          | 24 +++----
 tests/test_heads.py                           |  4 +-
 tests/test_nms.py                             |  2 +-
 tests/test_wrappers.py                        |  2 +-
 tools/analyze_logs.py                         | 26 ++++---
 tools/coco_error_analysis.py                  | 15 ++--
 tools/convert_datasets/cityscapes.py          |  6 +-
 tools/convert_datasets/pascal_voc.py          | 20 +++---
 tools/detectron2pytorch.py                    | 24 +++----
 tools/get_flops.py                            |  4 +-
 tools/publish_model.py                        |  2 +-
 tools/pytorch2onnx.py                         |  6 +-
 tools/robustness_eval.py                      | 48 ++++++-------
 tools/test.py                                 |  2 +-
 tools/test_robustness.py                      | 14 ++--
 tools/train.py                                | 13 ++--
 tools/upgrade_model_version.py                |  2 +-
 72 files changed, 351 insertions(+), 416 deletions(-)

diff --git a/mmdet/apis/inference.py b/mmdet/apis/inference.py
index a6ab2c97..c26ae0f0 100644
--- a/mmdet/apis/inference.py
+++ b/mmdet/apis/inference.py
@@ -30,7 +30,7 @@ def init_detector(config, checkpoint=None, device='cuda:0'):
         config = mmcv.Config.fromfile(config)
     elif not isinstance(config, mmcv.Config):
         raise TypeError('config must be a filename or Config object, '
-                        'but got {}'.format(type(config)))
+                        f'but got {type(config)}')
     config.model.pretrained = None
     model = build_detector(config.model, test_cfg=config.test_cfg)
     if checkpoint is not None:
diff --git a/mmdet/apis/test.py b/mmdet/apis/test.py
index 282c3d76..5b0dea2d 100644
--- a/mmdet/apis/test.py
+++ b/mmdet/apis/test.py
@@ -93,7 +93,7 @@ def collect_results_cpu(result_part, size, tmpdir=None):
     else:
         mmcv.mkdir_or_exist(tmpdir)
     # dump the part result to the dir
-    mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
+    mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
     dist.barrier()
     # collect all parts
     if rank != 0:
@@ -102,7 +102,7 @@ def collect_results_cpu(result_part, size, tmpdir=None):
         # load results of all parts from tmp dir
         part_list = []
         for i in range(world_size):
-            part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
+            part_file = osp.join(tmpdir, f'part_{i}.pkl')
             part_list.append(mmcv.load(part_file))
         # sort the results
         ordered_results = []
diff --git a/mmdet/apis/train.py b/mmdet/apis/train.py
index 8224865a..c88b1cc6 100644
--- a/mmdet/apis/train.py
+++ b/mmdet/apis/train.py
@@ -40,8 +40,7 @@ def parse_losses(losses):
         elif isinstance(loss_value, list):
             log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
         else:
-            raise TypeError(
-                '{} is not a tensor or list of tensors'.format(loss_name))
+            raise TypeError(f'{loss_name} is not a tensor or list of tensors')
 
     loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key)
 
diff --git a/mmdet/core/anchor/anchor_generator.py b/mmdet/core/anchor/anchor_generator.py
index f2c716ec..020e2394 100644
--- a/mmdet/core/anchor/anchor_generator.py
+++ b/mmdet/core/anchor/anchor_generator.py
@@ -64,21 +64,21 @@ class AnchorGenerator(object):
         # check center and center_offset
         if center_offset != 0:
             assert centers is None, 'center cannot be set when center_offset' \
-                '!=0, {} is given.'.format(centers)
+                f'!=0, {centers} is given.'
         if not (0 <= center_offset <= 1):
-            raise ValueError('center_offset should be in range [0, 1], {} is'
-                             ' given.'.format(center_offset))
+            raise ValueError('center_offset should be in range [0, 1], '
+                             f'{center_offset} is given.')
         if centers is not None:
             assert len(centers) == len(strides), \
                 'The number of strides should be the same as centers, got ' \
-                '{} and {}'.format(strides, centers)
+                f'{strides} and {centers}'
 
         # calculate base sizes of anchors
         self.strides = strides
         self.base_sizes = list(strides) if base_sizes is None else base_sizes
         assert len(self.base_sizes) == len(self.strides), \
             'The number of strides should be the same as base sizes, got ' \
-            '{} and {}'.format(self.strides, self.base_sizes)
+            f'{self.strides} and {self.base_sizes}'
 
         # calculate scales of anchors
         assert ((octave_base_scale is not None
@@ -261,19 +261,18 @@ class AnchorGenerator(object):
     def __repr__(self):
         indent_str = '    '
         repr_str = self.__class__.__name__ + '(\n'
-        repr_str += '{}strides={},\n'.format(indent_str, self.strides)
-        repr_str += '{}ratios={},\n'.format(indent_str, self.ratios)
-        repr_str += '{}scales={},\n'.format(indent_str, self.scales)
-        repr_str += '{}base_sizes={},\n'.format(indent_str, self.base_sizes)
-        repr_str += '{}scale_major={},\n'.format(indent_str, self.scale_major)
-        repr_str += '{}octave_base_scale={},\n'.format(indent_str,
-                                                       self.octave_base_scale)
-        repr_str += '{}scales_per_octave={},\n'.format(indent_str,
-                                                       self.scales_per_octave)
-        repr_str += '{}num_levels={}\n'.format(indent_str, self.num_levels)
-        repr_str += '{}centers={},\n'.format(indent_str, self.centers)
-        repr_str += '{}center_offset={})'.format(indent_str,
-                                                 self.center_offset)
+        repr_str += f'{indent_str}strides={self.strides},\n'
+        repr_str += f'{indent_str}ratios={self.ratios},\n'
+        repr_str += f'{indent_str}scales={self.scales},\n'
+        repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
+        repr_str += f'{indent_str}scale_major={self.scale_major},\n'
+        repr_str += f'{indent_str}octave_base_scale='
+        repr_str += f'{self.octave_base_scale},\n'
+        repr_str += f'{indent_str}scales_per_octave='
+        repr_str += f'{self.scales_per_octave},\n'
+        repr_str += f'{indent_str}num_levels={self.num_levels}\n'
+        repr_str += f'{indent_str}centers={self.centers},\n'
+        repr_str += f'{indent_str}center_offset={self.center_offset})'
         return repr_str
 
 
@@ -326,8 +325,8 @@ class SSDAnchorGenerator(AnchorGenerator):
             else:
                 raise ValueError(
                     'basesize_ratio_range[0] should be either 0.15'
-                    'or 0.2 when input_size is 300, got {}.'.format(
-                        basesize_ratio_range[0]))
+                    'or 0.2 when input_size is 300, got '
+                    f'{basesize_ratio_range[0]}.')
         elif input_size == 512:
             if basesize_ratio_range[0] == 0.1:  # SSD512 COCO
                 min_sizes.insert(0, int(input_size * 4 / 100))
@@ -336,13 +335,12 @@ class SSDAnchorGenerator(AnchorGenerator):
                 min_sizes.insert(0, int(input_size * 7 / 100))
                 max_sizes.insert(0, int(input_size * 15 / 100))
             else:
-                raise ValueError(
-                    'basesize_ratio_range[0] should be either 0.1'
-                    'or 0.15 when input_size is 512, got {}.'.format(
-                        basesize_ratio_range[0]))
+                raise ValueError('basesize_ratio_range[0] should be either 0.1'
+                                 'or 0.15 when input_size is 512, got'
+                                 ' {basesize_ratio_range[0]}.')
         else:
             raise ValueError('Only support 300 or 512 in SSDAnchorGenerator'
-                             ', got {}.'.format(input_size))
+                             f', got {input_size}.')
 
         anchor_ratios = []
         anchor_scales = []
@@ -379,16 +377,16 @@ class SSDAnchorGenerator(AnchorGenerator):
     def __repr__(self):
         indent_str = '    '
         repr_str = self.__class__.__name__ + '(\n'
-        repr_str += '{}strides={},\n'.format(indent_str, self.strides)
-        repr_str += '{}scales={},\n'.format(indent_str, self.scales)
-        repr_str += '{}scale_major={},\n'.format(indent_str, self.scale_major)
-        repr_str += '{}input_size={},\n'.format(indent_str, self.input_size)
-        repr_str += '{}scales={},\n'.format(indent_str, self.scales)
-        repr_str += '{}ratios={},\n'.format(indent_str, self.ratios)
-        repr_str += '{}num_levels={}\n'.format(indent_str, self.num_levels)
-        repr_str += '{}base_sizes={},\n'.format(indent_str, self.base_sizes)
-        repr_str += '{}basesize_ratio_range={})'.format(
-            indent_str, self.basesize_ratio_range)
+        repr_str += f'{indent_str}strides={self.strides},\n'
+        repr_str += f'{indent_str}scales={self.scales},\n'
+        repr_str += f'{indent_str}scale_major={self.scale_major},\n'
+        repr_str += f'{indent_str}input_size={self.input_size},\n'
+        repr_str += f'{indent_str}scales={self.scales},\n'
+        repr_str += f'{indent_str}ratios={self.ratios},\n'
+        repr_str += f'{indent_str}num_levels={self.num_levels},\n'
+        repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
+        repr_str += f'{indent_str}basesize_ratio_range='
+        repr_str += f'{self.basesize_ratio_range})'
         return repr_str
 
 
diff --git a/mmdet/core/bbox/assigners/assign_result.py b/mmdet/core/bbox/assigners/assign_result.py
index 4daaa770..f6979e6d 100644
--- a/mmdet/core/bbox/assigners/assign_result.py
+++ b/mmdet/core/bbox/assigners/assign_result.py
@@ -71,21 +71,20 @@ class AssignResult(util_mixins.NiceRepr):
         Create a "nice" summary string describing this assign result
         """
         parts = []
-        parts.append('num_gts={!r}'.format(self.num_gts))
+        parts.append(f'num_gts={self.num_gts!r}')
         if self.gt_inds is None:
-            parts.append('gt_inds={!r}'.format(self.gt_inds))
+            parts.append(f'gt_inds={self.gt_inds!r}')
         else:
-            parts.append('gt_inds.shape={!r}'.format(
-                tuple(self.gt_inds.shape)))
+            parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}')
         if self.max_overlaps is None:
-            parts.append('max_overlaps={!r}'.format(self.max_overlaps))
+            parts.append(f'max_overlaps={self.max_overlaps!r}')
         else:
-            parts.append('max_overlaps.shape={!r}'.format(
-                tuple(self.max_overlaps.shape)))
+            parts.append('max_overlaps.shape='
+                         f'{tuple(self.max_overlaps.shape)!r}')
         if self.labels is None:
-            parts.append('labels={!r}'.format(self.labels))
+            parts.append(f'labels={self.labels!r}')
         else:
-            parts.append('labels.shape={!r}'.format(tuple(self.labels.shape)))
+            parts.append(f'labels.shape={tuple(self.labels.shape)!r}')
         return ', '.join(parts)
 
     @classmethod
diff --git a/mmdet/core/bbox/iou_calculators/iou2d_calculator.py b/mmdet/core/bbox/iou_calculators/iou2d_calculator.py
index 8c8dfa07..6732af07 100644
--- a/mmdet/core/bbox/iou_calculators/iou2d_calculator.py
+++ b/mmdet/core/bbox/iou_calculators/iou2d_calculator.py
@@ -12,8 +12,7 @@ class BboxOverlaps2D(object):
 
     def __repr__(self):
         repr_str = self.__class__.__name__
-        repr_str += '(mode={}, is_aligned={})'.format(self.mode,
-                                                      self.is_aligned)
+        repr_str += f'(mode={self.mode}, is_aligned={self.is_aligned})'
         return repr_str
 
 
diff --git a/mmdet/core/bbox/samplers/sampling_result.py b/mmdet/core/bbox/samplers/sampling_result.py
index dcf25eec..b24d9654 100644
--- a/mmdet/core/bbox/samplers/sampling_result.py
+++ b/mmdet/core/bbox/samplers/sampling_result.py
@@ -9,7 +9,7 @@ class SamplingResult(util_mixins.NiceRepr):
         >>> # xdoctest: +IGNORE_WANT
         >>> from mmdet.core.bbox.samplers.sampling_result import *  # NOQA
         >>> self = SamplingResult.random(rng=10)
-        >>> print('self = {}'.format(self))
+        >>> print(f'self = {self}')
         self = <SamplingResult({
             'neg_bboxes': torch.Size([12, 4]),
             'neg_inds': tensor([ 0,  1,  2,  4,  5,  6,  7,  8,  9, 10, 11, 12]),
@@ -57,9 +57,9 @@ class SamplingResult(util_mixins.NiceRepr):
 
         Example:
             >>> self = SamplingResult.random()
-            >>> print('self = {}'.format(self.to(None)))
+            >>> print(f'self = {self.to(None)}')
             >>> # xdoctest: +REQUIRES(--gpu)
-            >>> print('self = {}'.format(self.to(0)))
+            >>> print(f'self = {self.to(0)}')
         """
         _dict = self.__dict__
         for key, value in _dict.items():
@@ -71,7 +71,7 @@ class SamplingResult(util_mixins.NiceRepr):
         data = self.info.copy()
         data['pos_bboxes'] = data.pop('pos_bboxes').shape
         data['neg_bboxes'] = data.pop('neg_bboxes').shape
-        parts = ['\'{}\': {!r}'.format(k, v) for k, v in sorted(data.items())]
+        parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())]
         body = '    ' + ',\n    '.join(parts)
         return '{\n' + body + '\n}'
 
diff --git a/mmdet/core/evaluation/class_names.py b/mmdet/core/evaluation/class_names.py
index 78427734..4b8845f3 100644
--- a/mmdet/core/evaluation/class_names.py
+++ b/mmdet/core/evaluation/class_names.py
@@ -110,7 +110,7 @@ def get_classes(dataset):
         if dataset in alias2name:
             labels = eval(alias2name[dataset] + '_classes()')
         else:
-            raise ValueError('Unrecognized dataset: {}'.format(dataset))
+            raise ValueError(f'Unrecognized dataset: {dataset}')
     else:
-        raise TypeError('dataset must a str, but got {}'.format(type(dataset)))
+        raise TypeError(f'dataset must a str, but got {type(dataset)}')
     return labels
diff --git a/mmdet/core/evaluation/eval_hooks.py b/mmdet/core/evaluation/eval_hooks.py
index f10cc545..31ef23fb 100644
--- a/mmdet/core/evaluation/eval_hooks.py
+++ b/mmdet/core/evaluation/eval_hooks.py
@@ -14,9 +14,8 @@ class EvalHook(Hook):
 
     def __init__(self, dataloader, interval=1, **eval_kwargs):
         if not isinstance(dataloader, DataLoader):
-            raise TypeError(
-                'dataloader must be a pytorch DataLoader, but got {}'.format(
-                    type(dataloader)))
+            raise TypeError('dataloader must be a pytorch DataLoader, but got'
+                            f' {type(dataloader)}')
         self.dataloader = dataloader
         self.interval = interval
         self.eval_kwargs = eval_kwargs
@@ -54,9 +53,8 @@ class DistEvalHook(EvalHook):
                  gpu_collect=False,
                  **eval_kwargs):
         if not isinstance(dataloader, DataLoader):
-            raise TypeError(
-                'dataloader must be a pytorch DataLoader, but got {}'.format(
-                    type(dataloader)))
+            raise TypeError('dataloader must be a pytorch DataLoader, but got '
+                            f'{type(dataloader)}')
         self.dataloader = dataloader
         self.interval = interval
         self.gpu_collect = gpu_collect
diff --git a/mmdet/core/evaluation/mean_ap.py b/mmdet/core/evaluation/mean_ap.py
index 6934a43d..0573d66f 100644
--- a/mmdet/core/evaluation/mean_ap.py
+++ b/mmdet/core/evaluation/mean_ap.py
@@ -442,15 +442,15 @@ def print_map_summary(mean_ap,
     header = ['class', 'gts', 'dets', 'recall', 'ap']
     for i in range(num_scales):
         if scale_ranges is not None:
-            print_log('Scale range {}'.format(scale_ranges[i]), logger=logger)
+            print_log(f'Scale range {scale_ranges[i]}', logger=logger)
         table_data = [header]
         for j in range(num_classes):
             row_data = [
                 label_names[j], num_gts[i, j], results[j]['num_dets'],
-                '{:.3f}'.format(recalls[i, j]), '{:.3f}'.format(aps[i, j])
+                f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}'
             ]
             table_data.append(row_data)
-        table_data.append(['mAP', '', '', '', '{:.3f}'.format(mean_ap[i])])
+        table_data.append(['mAP', '', '', '', '{mean_ap[i]:.3f}'])
         table = AsciiTable(table_data)
         table.inner_footing_row_border = True
         print_log('\n' + table.table, logger=logger)
diff --git a/mmdet/core/evaluation/recall.py b/mmdet/core/evaluation/recall.py
index 3a591e88..a8bc0657 100644
--- a/mmdet/core/evaluation/recall.py
+++ b/mmdet/core/evaluation/recall.py
@@ -133,10 +133,7 @@ def print_recall_summary(recalls,
     row_header = [''] + iou_thrs[col_idxs].tolist()
     table_data = [row_header]
     for i, num in enumerate(proposal_nums[row_idxs]):
-        row = [
-            '{:.3f}'.format(val)
-            for val in recalls[row_idxs[i], col_idxs].tolist()
-        ]
+        row = [f'{val:.3f}' for val in recalls[row_idxs[i], col_idxs].tolist()]
         row.insert(0, num)
         table_data.append(row)
     table = AsciiTable(table_data)
diff --git a/mmdet/core/mask/structures.py b/mmdet/core/mask/structures.py
index a2ddd884..20f4d2e6 100644
--- a/mmdet/core/mask/structures.py
+++ b/mmdet/core/mask/structures.py
@@ -92,9 +92,9 @@ class BitmapMasks(BaseInstanceMasks):
 
     def __repr__(self):
         s = self.__class__.__name__ + '('
-        s += 'num_masks={}, '.format(len(self.masks))
-        s += 'height={}, '.format(len(self.height))
-        s += 'width={})'.format(len(self.width))
+        s += f'num_masks={len(self.masks)}, '
+        s += f'height={len(self.height)}, '
+        s += f'width={len(self.width)})'
         return s
 
     def __len__(self):
@@ -316,8 +316,7 @@ class PolygonMasks(BaseInstanceMasks):
                 masks = self.masks[index]
             except Exception:
                 raise ValueError(
-                    'Unsupported input of type {} for indexing!'.format(
-                        type(index)))
+                    f'Unsupported input of type {type(index)} for indexing!')
         if isinstance(masks[0], np.ndarray):
             masks = [masks]  # ensure a list of three levels
         return PolygonMasks(masks, self.height, self.width)
@@ -327,9 +326,9 @@ class PolygonMasks(BaseInstanceMasks):
 
     def __repr__(self):
         s = self.__class__.__name__ + '('
-        s += 'num_masks={}, '.format(len(self.masks))
-        s += 'height={}, '.format(len(self.height))
-        s += 'width={})'.format(len(self.width))
+        s += f'num_masks={len(self.masks)}, '
+        s += f'height={len(self.height)}, '
+        s += f'width={len(self.width)})'
         return s
 
     def __len__(self):
diff --git a/mmdet/core/optimizer/default_constructor.py b/mmdet/core/optimizer/default_constructor.py
index 4ff02ab7..3ae246bd 100644
--- a/mmdet/core/optimizer/default_constructor.py
+++ b/mmdet/core/optimizer/default_constructor.py
@@ -51,7 +51,7 @@ class DefaultOptimizerConstructor(object):
     def __init__(self, optimizer_cfg, paramwise_cfg=None):
         if not isinstance(optimizer_cfg, dict):
             raise TypeError('optimizer_cfg should be a dict',
-                            'but got {}'.format(type(optimizer_cfg)))
+                            f'but got {type(optimizer_cfg)}')
         self.optimizer_cfg = optimizer_cfg
         self.paramwise_cfg = {} if paramwise_cfg is None else paramwise_cfg
         self.base_lr = optimizer_cfg.get('lr', None)
@@ -61,7 +61,7 @@ class DefaultOptimizerConstructor(object):
     def _validate_cfg(self):
         if not isinstance(self.paramwise_cfg, dict):
             raise TypeError('paramwise_cfg should be None or a dict, '
-                            'but got {}'.format(type(self.paramwise_cfg)))
+                            f'but got {type(self.paramwise_cfg)}')
         # get base lr and weight decay
         # weight_decay must be explicitly specified if mult is specified
         if ('bias_decay_mult' in self.paramwise_cfg
@@ -111,9 +111,8 @@ class DefaultOptimizerConstructor(object):
                 params.append(param_group)
                 continue
             if bypass_duplicate and self._is_in(param_group, params):
-                warnings.warn('{} is duplicate. It is skipped since '
-                              'bypass_duplicate={}'.format(
-                                  prefix, bypass_duplicate))
+                warnings.warn(f'{prefix} is duplicate. It is skipped since '
+                              f'bypass_duplicate={bypass_duplicate}')
                 continue
             # bias_lr_mult affects all bias parameters except for norm.bias
             if name == 'bias' and not is_norm:
@@ -135,8 +134,7 @@ class DefaultOptimizerConstructor(object):
             params.append(param_group)
 
         for child_name, child_mod in module.named_children():
-            child_prefix = '{}.{}'.format(prefix,
-                                          child_name) if prefix else child_name
+            child_prefix = f'{prefix}.{child_name}' if prefix else child_name
             self.add_params(params, child_mod, prefix=child_prefix)
 
     def __call__(self, model):
diff --git a/mmdet/datasets/cityscapes.py b/mmdet/datasets/cityscapes.py
index 96d6f4d9..3388cb38 100644
--- a/mmdet/datasets/cityscapes.py
+++ b/mmdet/datasets/cityscapes.py
@@ -138,12 +138,11 @@ class CityscapesDataset(CocoDataset):
                     class_id = CSLabels.name2label[classes].id
                     score = bboxes[i, -1]
                     mask = maskUtils.decode(segms[i]).astype(np.uint8)
-                    png_filename = osp.join(
-                        outfile_prefix,
-                        basename + '_{}_{}.png'.format(i, classes))
+                    png_filename = osp.join(outfile_prefix,
+                                            basename + f'_{i}_{classes}.png')
                     mmcv.imwrite(mask, png_filename)
-                    fout.write('{} {} {}\n'.format(
-                        osp.basename(png_filename), class_id, score))
+                    fout.write(f'{osp.basename(png_filename)} {class_id} '
+                               f'{score}\n')
             result_files.append(pred_txt)
 
         return result_files
@@ -249,9 +248,7 @@ class CityscapesDataset(CocoDataset):
             result_dir = osp.join(tmp_dir.name, 'results')
 
         eval_results = {}
-        print_log(
-            'Evaluating results under {} ...'.format(result_dir),
-            logger=logger)
+        print_log(f'Evaluating results under {result_dir} ...', logger=logger)
 
         # set global states in cityscapes evaluation API
         CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..')
@@ -266,9 +263,8 @@ class CityscapesDataset(CocoDataset):
             '*/*_gtFine_instanceIds.png')
 
         groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch)
-        assert len(groundTruthImgList), \
-            'Cannot find ground truth images in {}.'.format(
-                CSEval.args.groundTruthSearch)
+        assert len(groundTruthImgList), 'Cannot find ground truth images' \
+            f' in {CSEval.args.groundTruthSearch}.'
         predictionImgList = []
         for gt in groundTruthImgList:
             predictionImgList.append(CSEval.getPrediction(gt, CSEval.args))
diff --git a/mmdet/datasets/coco.py b/mmdet/datasets/coco.py
index b5a665f7..14957166 100644
--- a/mmdet/datasets/coco.py
+++ b/mmdet/datasets/coco.py
@@ -241,22 +241,19 @@ class CocoDataset(CustomDataset):
         result_files = dict()
         if isinstance(results[0], list):
             json_results = self._det2json(results)
-            result_files['bbox'] = '{}.{}.json'.format(outfile_prefix, 'bbox')
-            result_files['proposal'] = '{}.{}.json'.format(
-                outfile_prefix, 'bbox')
+            result_files['bbox'] = f'{outfile_prefix}.bbox.json'
+            result_files['proposal'] = f'{outfile_prefix}.bbox.json'
             mmcv.dump(json_results, result_files['bbox'])
         elif isinstance(results[0], tuple):
             json_results = self._segm2json(results)
-            result_files['bbox'] = '{}.{}.json'.format(outfile_prefix, 'bbox')
-            result_files['proposal'] = '{}.{}.json'.format(
-                outfile_prefix, 'bbox')
-            result_files['segm'] = '{}.{}.json'.format(outfile_prefix, 'segm')
+            result_files['bbox'] = f'{outfile_prefix}.bbox.json'
+            result_files['proposal'] = f'{outfile_prefix}.bbox.json'
+            result_files['segm'] = f'{outfile_prefix}.segm.json'
             mmcv.dump(json_results[0], result_files['bbox'])
             mmcv.dump(json_results[1], result_files['segm'])
         elif isinstance(results[0], np.ndarray):
             json_results = self._proposal2json(results)
-            result_files['proposal'] = '{}.{}.json'.format(
-                outfile_prefix, 'proposal')
+            result_files['proposal'] = f'{outfile_prefix}.proposal.json'
             mmcv.dump(json_results, result_files['proposal'])
         else:
             raise TypeError('invalid type of results')
@@ -347,14 +344,14 @@ class CocoDataset(CustomDataset):
         allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
         for metric in metrics:
             if metric not in allowed_metrics:
-                raise KeyError('metric {} is not supported'.format(metric))
+                raise KeyError(f'metric {metric} is not supported')
 
         result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
 
         eval_results = {}
         cocoGt = self.coco
         for metric in metrics:
-            msg = 'Evaluating {}...'.format(metric)
+            msg = f'Evaluating {metric}...'
             if logger is None:
                 msg = '\n' + msg
             print_log(msg, logger=logger)
@@ -364,14 +361,14 @@ class CocoDataset(CustomDataset):
                     results, proposal_nums, iou_thrs, logger='silent')
                 log_msg = []
                 for i, num in enumerate(proposal_nums):
-                    eval_results['AR@{}'.format(num)] = ar[i]
-                    log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i]))
+                    eval_results[f'AR@{num}'] = ar[i]
+                    log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
                 log_msg = ''.join(log_msg)
                 print_log(log_msg, logger=logger)
                 continue
 
             if metric not in result_files:
-                raise KeyError('{} is not in results'.format(metric))
+                raise KeyError(f'{metric} is not in results')
             try:
                 cocoDt = cocoGt.loadRes(result_files[metric])
             except IndexError:
@@ -396,7 +393,7 @@ class CocoDataset(CustomDataset):
                     'AR_l@1000'
                 ]
                 for i, item in enumerate(metric_items):
-                    val = float('{:.3f}'.format(cocoEval.stats[i + 6]))
+                    val = float(f'{cocoEval.stats[i + 6]:.3f}')
                     eval_results[item] = val
             else:
                 cocoEval.evaluate()
@@ -421,8 +418,7 @@ class CocoDataset(CustomDataset):
                         else:
                             ap = float('nan')
                         results_per_category.append(
-                            ('{}'.format(nm['name']),
-                             '{:0.3f}'.format(float(ap))))
+                            (f'{nm["name"]}', f'{float(ap):0.3f}'))
 
                     num_columns = min(6, len(results_per_category) * 2)
                     results_flatten = list(
@@ -441,12 +437,12 @@ class CocoDataset(CustomDataset):
                     'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
                 ]
                 for i in range(len(metric_items)):
-                    key = '{}_{}'.format(metric, metric_items[i])
-                    val = float('{:.3f}'.format(cocoEval.stats[i]))
+                    key = f'{metric}_{metric_items[i]}'
+                    val = float(f'{cocoEval.stats[i]:.3f}')
                     eval_results[key] = val
-                eval_results['{}_mAP_copypaste'.format(metric)] = (
-                    '{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
-                    '{ap[4]:.3f} {ap[5]:.3f}').format(ap=cocoEval.stats[:6])
+                eval_results[f'{metric}_mAP_copypaste'] = (
+                    f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
+                    f'{ap[4]:.3f} {ap[5]:.3f}')
         if tmp_dir is not None:
             tmp_dir.cleanup()
         return eval_results
diff --git a/mmdet/datasets/custom.py b/mmdet/datasets/custom.py
index d6a1bd62..2f44c438 100644
--- a/mmdet/datasets/custom.py
+++ b/mmdet/datasets/custom.py
@@ -181,8 +181,7 @@ class CustomDataset(Dataset):
         elif isinstance(classes, (tuple, list)):
             class_names = classes
         else:
-            raise ValueError('Unsupported type {} of classes.'.format(
-                type(classes)))
+            raise ValueError(f'Unsupported type {type(classes)} of classes.')
 
         return class_names
 
@@ -220,7 +219,7 @@ class CustomDataset(Dataset):
             metric = metric[0]
         allowed_metrics = ['mAP', 'recall']
         if metric not in allowed_metrics:
-            raise KeyError('metric {} is not supported'.format(metric))
+            raise KeyError(f'metric {metric} is not supported')
         annotations = [self.get_ann_info(i) for i in range(len(self))]
         eval_results = {}
         if metric == 'mAP':
@@ -241,10 +240,9 @@ class CustomDataset(Dataset):
                 gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
             for i, num in enumerate(proposal_nums):
                 for j, iou in enumerate(iou_thr):
-                    eval_results['recall@{}@{}'.format(num, iou)] = recalls[i,
-                                                                            j]
+                    eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
             if recalls.shape[1] > 1:
                 ar = recalls.mean(axis=1)
                 for i, num in enumerate(proposal_nums):
-                    eval_results['AR@{}'.format(num)] = ar[i]
+                    eval_results[f'AR@{num}'] = ar[i]
         return eval_results
diff --git a/mmdet/datasets/pipelines/compose.py b/mmdet/datasets/pipelines/compose.py
index 92526f03..9c69705e 100644
--- a/mmdet/datasets/pipelines/compose.py
+++ b/mmdet/datasets/pipelines/compose.py
@@ -31,6 +31,6 @@ class Compose(object):
         format_string = self.__class__.__name__ + '('
         for t in self.transforms:
             format_string += '\n'
-            format_string += '    {0}'.format(t)
+            format_string += f'    {t}'
         format_string += '\n)'
         return format_string
diff --git a/mmdet/datasets/pipelines/formating.py b/mmdet/datasets/pipelines/formating.py
index 718fe5ab..fe8cbcc5 100644
--- a/mmdet/datasets/pipelines/formating.py
+++ b/mmdet/datasets/pipelines/formating.py
@@ -25,8 +25,7 @@ def to_tensor(data):
     elif isinstance(data, float):
         return torch.FloatTensor([data])
     else:
-        raise TypeError('type {} cannot be converted to tensor.'.format(
-            type(data)))
+        raise TypeError(f'type {type(data)} cannot be converted to tensor.')
 
 
 @PIPELINES.register_module
@@ -41,7 +40,7 @@ class ToTensor(object):
         return results
 
     def __repr__(self):
-        return self.__class__.__name__ + '(keys={})'.format(self.keys)
+        return self.__class__.__name__ + f'(keys={self.keys})'
 
 
 @PIPELINES.register_module
@@ -59,7 +58,7 @@ class ImageToTensor(object):
         return results
 
     def __repr__(self):
-        return self.__class__.__name__ + '(keys={})'.format(self.keys)
+        return self.__class__.__name__ + f'(keys={self.keys})'
 
 
 @PIPELINES.register_module
@@ -75,8 +74,8 @@ class Transpose(object):
         return results
 
     def __repr__(self):
-        return self.__class__.__name__ + '(keys={}, order={})'.format(
-            self.keys, self.order)
+        return self.__class__.__name__ + \
+            f'(keys={self.keys}, order={self.order})'
 
 
 @PIPELINES.register_module
@@ -95,7 +94,7 @@ class ToDataContainer(object):
         return results
 
     def __repr__(self):
-        return self.__class__.__name__ + '(fields={})'.format(self.fields)
+        return self.__class__.__name__ + f'(fields={self.fields})'
 
 
 @PIPELINES.register_module
@@ -188,8 +187,8 @@ class Collect(object):
         return data
 
     def __repr__(self):
-        return self.__class__.__name__ + '(keys={}, meta_keys={})'.format(
-            self.keys, self.meta_keys)
+        return self.__class__.__name__ + \
+            f'(keys={self.keys}, meta_keys={self.meta_keys})'
 
 
 @PIPELINES.register_module
@@ -221,4 +220,4 @@ class WrapFieldsToLists(object):
         return results
 
     def __repr__(self):
-        return '{}()'.format(self.__class__.__name__)
+        return f'{self.__class__.__name__}()'
diff --git a/mmdet/datasets/pipelines/instaboost.py b/mmdet/datasets/pipelines/instaboost.py
index e4d24128..5460eb6e 100644
--- a/mmdet/datasets/pipelines/instaboost.py
+++ b/mmdet/datasets/pipelines/instaboost.py
@@ -93,5 +93,5 @@ class InstaBoost(object):
 
     def __repr__(self):
         repr_str = self.__class__.__name__
-        repr_str += ('(cfg={}, aug_ratio={})').format(self.cfg, self.aug_ratio)
+        repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})'
         return repr_str
diff --git a/mmdet/datasets/pipelines/loading.py b/mmdet/datasets/pipelines/loading.py
index ba555f6a..4e75b0d4 100644
--- a/mmdet/datasets/pipelines/loading.py
+++ b/mmdet/datasets/pipelines/loading.py
@@ -39,8 +39,8 @@ class LoadImageFromFile(object):
         return results
 
     def __repr__(self):
-        return "{} (to_float32={}, color_type='{}')".format(
-            self.__class__.__name__, self.to_float32, self.color_type)
+        return f'{self.__class__.__name__}(to_float32={self.to_float32}, ' \
+            f"color_type='{self.color_type}')"
 
 
 @PIPELINES.register_module
@@ -80,8 +80,8 @@ class LoadMultiChannelImageFromFiles(object):
         return results
 
     def __repr__(self):
-        return "{} (to_float32={}, color_type='{}')".format(
-            self.__class__.__name__, self.to_float32, self.color_type)
+        return f'{self.__class__.__name__}(to_float32={self.to_float32}, ' \
+            f"color_type='{self.color_type}')"
 
 
 @PIPELINES.register_module
@@ -181,9 +181,10 @@ class LoadAnnotations(object):
 
     def __repr__(self):
         repr_str = self.__class__.__name__
-        repr_str += ('(with_bbox={}, with_label={}, with_mask={},'
-                     ' with_seg={})').format(self.with_bbox, self.with_label,
-                                             self.with_mask, self.with_seg)
+        repr_str += f'(with_bbox={self.with_bbox}, '
+        repr_str += f'with_label={self.with_label}, '
+        repr_str += f'with_mask={self.with_mask}, '
+        repr_str += f'with_seg={self.with_seg})'
         return repr_str
 
 
@@ -198,7 +199,7 @@ class LoadProposals(object):
         if proposals.shape[1] not in (4, 5):
             raise AssertionError(
                 'proposals should have shapes (n, 4) or (n, 5), '
-                'but found {}'.format(proposals.shape))
+                f'but found {proposals.shape}')
         proposals = proposals[:, :4]
 
         if self.num_max_proposals is not None:
@@ -211,5 +212,5 @@ class LoadProposals(object):
         return results
 
     def __repr__(self):
-        return self.__class__.__name__ + '(num_max_proposals={})'.format(
-            self.num_max_proposals)
+        return self.__class__.__name__ + \
+            f'(num_max_proposals={self.num_max_proposals})'
diff --git a/mmdet/datasets/pipelines/test_aug.py b/mmdet/datasets/pipelines/test_aug.py
index a2b7230d..bcf1b55f 100644
--- a/mmdet/datasets/pipelines/test_aug.py
+++ b/mmdet/datasets/pipelines/test_aug.py
@@ -33,6 +33,6 @@ class MultiScaleFlipAug(object):
 
     def __repr__(self):
         repr_str = self.__class__.__name__
-        repr_str += '(transforms={}, img_scale={}, flip={})'.format(
-            self.transforms, self.img_scale, self.flip)
+        repr_str += f'(transforms={self.transforms}, '
+        repr_str += f'img_scale={self.img_scale}, flip={self.flip})'
         return repr_str
diff --git a/mmdet/datasets/pipelines/transforms.py b/mmdet/datasets/pipelines/transforms.py
index a6f12acc..2857d329 100644
--- a/mmdet/datasets/pipelines/transforms.py
+++ b/mmdet/datasets/pipelines/transforms.py
@@ -177,11 +177,10 @@ class Resize(object):
 
     def __repr__(self):
         repr_str = self.__class__.__name__
-        repr_str += ('(img_scale={}, multiscale_mode={}, ratio_range={}, '
-                     'keep_ratio={})').format(self.img_scale,
-                                              self.multiscale_mode,
-                                              self.ratio_range,
-                                              self.keep_ratio)
+        repr_str += f'(img_scale={self.img_scale}, '
+        repr_str += f'multiscale_mode={self.multiscale_mode}, '
+        repr_str += f'ratio_range={self.ratio_range}, '
+        repr_str += f'keep_ratio={self.keep_ratio})'
         return repr_str
 
 
@@ -222,8 +221,7 @@ class RandomFlip(object):
             flipped[..., 1::4] = h - bboxes[..., 3::4]
             flipped[..., 3::4] = h - bboxes[..., 1::4]
         else:
-            raise ValueError(
-                'Invalid flipping direction "{}"'.format(direction))
+            raise ValueError(f"Invalid flipping direction '{direction}'")
         return flipped
 
     def __call__(self, results):
@@ -252,8 +250,7 @@ class RandomFlip(object):
         return results
 
     def __repr__(self):
-        return self.__class__.__name__ + '(flip_ratio={})'.format(
-            self.flip_ratio)
+        return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})'
 
 
 @PIPELINES.register_module
@@ -306,8 +303,9 @@ class Pad(object):
 
     def __repr__(self):
         repr_str = self.__class__.__name__
-        repr_str += '(size={}, size_divisor={}, pad_val={})'.format(
-            self.size, self.size_divisor, self.pad_val)
+        repr_str += f'(size={self.size}, '
+        repr_str += f'size_divisor={self.size_divisor}, '
+        repr_str += f'pad_val={self.pad_val})'
         return repr_str
 
 
@@ -336,8 +334,7 @@ class Normalize(object):
 
     def __repr__(self):
         repr_str = self.__class__.__name__
-        repr_str += '(mean={}, std={}, to_rgb={})'.format(
-            self.mean, self.std, self.to_rgb)
+        repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})'
         return repr_str
 
 
@@ -399,8 +396,7 @@ class RandomCrop(object):
         return results
 
     def __repr__(self):
-        return self.__class__.__name__ + '(crop_size={})'.format(
-            self.crop_size)
+        return self.__class__.__name__ + f'(crop_size={self.crop_size})'
 
 
 @PIPELINES.register_module
@@ -422,8 +418,7 @@ class SegRescale(object):
         return results
 
     def __repr__(self):
-        return self.__class__.__name__ + '(scale_factor={})'.format(
-            self.scale_factor)
+        return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
 
 
 @PIPELINES.register_module
@@ -511,12 +506,12 @@ class PhotoMetricDistortion(object):
 
     def __repr__(self):
         repr_str = self.__class__.__name__
-        repr_str += ('(brightness_delta={}, contrast_range={}, '
-                     'saturation_range={}, hue_delta={})').format(
-                         self.brightness_delta,
-                         (self.contrast_lower, self.contrast_upper),
-                         (self.saturation_lower, self.saturation_upper),
-                         self.hue_delta)
+        repr_str += f'(\nbrightness_delta={self.brightness_delta},\n'
+        repr_str += f'contrast_range='
+        repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n'
+        repr_str += f'saturation_range='
+        repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n'
+        repr_str += f'hue_delta={self.hue_delta})'
         return repr_str
 
 
@@ -584,10 +579,9 @@ class Expand(object):
 
     def __repr__(self):
         repr_str = self.__class__.__name__
-        repr_str += '(mean={}, to_rgb={}, ratio_range={}, ' \
-                    'seg_ignore_label={})'.format(
-                        self.mean, self.to_rgb, self.ratio_range,
-                        self.seg_ignore_label)
+        repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, '
+        repr_str += f'ratio_range={self.ratio_range}, '
+        repr_str += f'seg_ignore_label={self.seg_ignore_label})'
         return repr_str
 
 
@@ -677,8 +671,8 @@ class MinIoURandomCrop(object):
 
     def __repr__(self):
         repr_str = self.__class__.__name__
-        repr_str += '(min_ious={}, min_crop_size={})'.format(
-            self.min_ious, self.min_crop_size)
+        repr_str += f'(min_ious={self.min_ious}, '
+        repr_str += f'min_crop_size={self.min_crop_size})'
         return repr_str
 
 
@@ -700,8 +694,8 @@ class Corrupt(object):
 
     def __repr__(self):
         repr_str = self.__class__.__name__
-        repr_str += '(corruption={}, severity={})'.format(
-            self.corruption, self.severity)
+        repr_str += f'(corruption={self.corruption}, '
+        repr_str += f'severity={self.severity})'
         return repr_str
 
 
@@ -777,8 +771,7 @@ class Albu(object):
             obj_cls = obj_type
         else:
             raise TypeError(
-                'type must be a str or valid type, but got {}'.format(
-                    type(obj_type)))
+                f'type must be a str or valid type, but got {type(obj_type)}')
 
         if 'transforms' in args:
             args['transforms'] = [
@@ -866,6 +859,5 @@ class Albu(object):
         return results
 
     def __repr__(self):
-        repr_str = self.__class__.__name__
-        repr_str += '(transforms={})'.format(self.transforms)
+        repr_str = self.__class__.__name__ + f'(transforms={self.transforms})'
         return repr_str
diff --git a/mmdet/datasets/voc.py b/mmdet/datasets/voc.py
index 24065b9f..3c24c19b 100644
--- a/mmdet/datasets/voc.py
+++ b/mmdet/datasets/voc.py
@@ -32,7 +32,7 @@ class VOCDataset(XMLDataset):
             metric = metric[0]
         allowed_metrics = ['mAP', 'recall']
         if metric not in allowed_metrics:
-            raise KeyError('metric {} is not supported'.format(metric))
+            raise KeyError(f'metric {metric} is not supported')
         annotations = [self.get_ann_info(i) for i in range(len(self))]
         eval_results = {}
         if metric == 'mAP':
@@ -57,10 +57,9 @@ class VOCDataset(XMLDataset):
                 gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
             for i, num in enumerate(proposal_nums):
                 for j, iou in enumerate(iou_thr):
-                    eval_results['recall@{}@{}'.format(num, iou)] = recalls[i,
-                                                                            j]
+                    eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
             if recalls.shape[1] > 1:
                 ar = recalls.mean(axis=1)
                 for i, num in enumerate(proposal_nums):
-                    eval_results['AR@{}'.format(num)] = ar[i]
+                    eval_results[f'AR@{num}'] = ar[i]
         return eval_results
diff --git a/mmdet/datasets/wider_face.py b/mmdet/datasets/wider_face.py
index 9bf9f8f9..5f3e53d7 100644
--- a/mmdet/datasets/wider_face.py
+++ b/mmdet/datasets/wider_face.py
@@ -23,9 +23,9 @@ class WIDERFaceDataset(XMLDataset):
         data_infos = []
         img_ids = mmcv.list_from_file(ann_file)
         for img_id in img_ids:
-            filename = '{}.jpg'.format(img_id)
+            filename = '{img_id}.jpg'
             xml_path = osp.join(self.img_prefix, 'Annotations',
-                                '{}.xml'.format(img_id))
+                                f'{img_id}.xml')
             tree = ET.parse(xml_path)
             root = tree.getroot()
             size = root.find('size')
diff --git a/mmdet/datasets/xml_style.py b/mmdet/datasets/xml_style.py
index 40cef1b0..5b6214eb 100644
--- a/mmdet/datasets/xml_style.py
+++ b/mmdet/datasets/xml_style.py
@@ -20,9 +20,9 @@ class XMLDataset(CustomDataset):
         data_infos = []
         img_ids = mmcv.list_from_file(ann_file)
         for img_id in img_ids:
-            filename = 'JPEGImages/{}.jpg'.format(img_id)
+            filename = f'JPEGImages/{img_id}.jpg'
             xml_path = osp.join(self.img_prefix, 'Annotations',
-                                '{}.xml'.format(img_id))
+                                f'{img_id}.xml')
             tree = ET.parse(xml_path)
             root = tree.getroot()
             size = root.find('size')
@@ -40,7 +40,7 @@ class XMLDataset(CustomDataset):
         for data_info in self.data_infos:
             img_id = data_info['id']
             xml_path = osp.join(self.img_prefix, 'Annotations',
-                                '{}.xml'.format(img_id))
+                                f'{img_id}.xml')
             tree = ET.parse(xml_path)
             root = tree.getroot()
             for obj in root.findall('object'):
@@ -53,8 +53,7 @@ class XMLDataset(CustomDataset):
 
     def get_ann_info(self, idx):
         img_id = self.data_infos[idx]['id']
-        xml_path = osp.join(self.img_prefix, 'Annotations',
-                            '{}.xml'.format(img_id))
+        xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
         tree = ET.parse(xml_path)
         root = tree.getroot()
         bboxes = []
diff --git a/mmdet/models/anchor_heads/anchor_head.py b/mmdet/models/anchor_heads/anchor_head.py
index 9158c218..83f33dcf 100644
--- a/mmdet/models/anchor_heads/anchor_head.py
+++ b/mmdet/models/anchor_heads/anchor_head.py
@@ -67,7 +67,7 @@ class AnchorHead(nn.Module):
             self.cls_out_channels = num_classes + 1
 
         if self.cls_out_channels <= 0:
-            raise ValueError('num_classes={} is too small'.format(num_classes))
+            raise ValueError(f'num_classes={num_classes} is too small')
         self.reg_decoded_bbox = reg_decoded_bbox
 
         self.background_label = (
diff --git a/mmdet/models/backbones/hrnet.py b/mmdet/models/backbones/hrnet.py
index ee0c6250..5ce5eb9a 100644
--- a/mmdet/models/backbones/hrnet.py
+++ b/mmdet/models/backbones/hrnet.py
@@ -43,18 +43,18 @@ class HRModule(nn.Module):
     def _check_branches(self, num_branches, num_blocks, in_channels,
                         num_channels):
         if num_branches != len(num_blocks):
-            error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
-                num_branches, len(num_blocks))
+            error_msg = f'NUM_BRANCHES({num_branches}) ' \
+                f'!= NUM_BLOCKS({len(num_blocks)})'
             raise ValueError(error_msg)
 
         if num_branches != len(num_channels):
-            error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
-                num_branches, len(num_channels))
+            error_msg = f'NUM_BRANCHES({num_branches}) ' \
+                f'!= NUM_CHANNELS({len(num_channels)})'
             raise ValueError(error_msg)
 
         if num_branches != len(in_channels):
-            error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
-                num_branches, len(in_channels))
+            error_msg = f'NUM_BRANCHES({num_branches}) ' \
+                f'!= NUM_INCHANNELS({len(in_channels)})'
             raise ValueError(error_msg)
 
     def _make_one_branch(self,
diff --git a/mmdet/models/backbones/resnet.py b/mmdet/models/backbones/resnet.py
index ee219bd1..b541b4e0 100644
--- a/mmdet/models/backbones/resnet.py
+++ b/mmdet/models/backbones/resnet.py
@@ -218,7 +218,7 @@ class Bottleneck(nn.Module):
                 plugin,
                 in_channels=in_channels,
                 postfix=plugin.pop('postfix', ''))
-            assert not hasattr(self, name), 'duplicate plugin {}'.format(name)
+            assert not hasattr(self, name), f'duplicate plugin {name}'
             self.add_module(name, layer)
             plugin_names.append(name)
         return plugin_names
@@ -362,7 +362,7 @@ class ResNet(nn.Module):
                  zero_init_residual=True):
         super(ResNet, self).__init__()
         if depth not in self.arch_settings:
-            raise KeyError('invalid depth {} for resnet'.format(depth))
+            raise KeyError(f'invalid depth {depth} for resnet')
         self.depth = depth
         self.base_channels = base_channels
         self.num_stages = num_stages
@@ -417,7 +417,7 @@ class ResNet(nn.Module):
                 dcn=dcn,
                 plugins=stage_plugins)
             self.inplanes = planes * self.block.expansion
-            layer_name = 'layer{}'.format(i + 1)
+            layer_name = f'layer{i + 1}'
             self.add_module(layer_name, res_layer)
             self.res_layers.append(layer_name)
 
@@ -548,7 +548,7 @@ class ResNet(nn.Module):
                         param.requires_grad = False
 
         for i in range(1, self.frozen_stages + 1):
-            m = getattr(self, 'layer{}'.format(i))
+            m = getattr(self, f'layer{i}')
             m.eval()
             for param in m.parameters():
                 param.requires_grad = False
diff --git a/mmdet/models/detectors/base.py b/mmdet/models/detectors/base.py
index ce48002d..936eeba0 100644
--- a/mmdet/models/detectors/base.py
+++ b/mmdet/models/detectors/base.py
@@ -79,19 +79,17 @@ class BaseDetector(nn.Module, metaclass=ABCMeta):
 
     def init_weights(self, pretrained=None):
         if pretrained is not None:
-            print_log('load model from: {}'.format(pretrained), logger='root')
+            print_log(f'load model from: {pretrained}', logger='root')
 
     async def aforward_test(self, *, img, img_metas, **kwargs):
         for var, name in [(img, 'img'), (img_metas, 'img_metas')]:
             if not isinstance(var, list):
-                raise TypeError('{} must be a list, but got {}'.format(
-                    name, type(var)))
+                raise TypeError(f'{name} must be a list, but got {type(var)}')
 
         num_augs = len(img)
         if num_augs != len(img_metas):
-            raise ValueError(
-                'num of augmentations ({}) != num of image metas ({})'.format(
-                    len(img), len(img_metas)))
+            raise ValueError(f'num of augmentations ({len(img)}) '
+                             f'!= num of image metas ({len(img_metas)})')
         # TODO: remove the restriction of samples_per_gpu == 1 when prepared
         samples_per_gpu = img[0].size(0)
         assert samples_per_gpu == 1
@@ -113,14 +111,12 @@ class BaseDetector(nn.Module, metaclass=ABCMeta):
         """
         for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
             if not isinstance(var, list):
-                raise TypeError('{} must be a list, but got {}'.format(
-                    name, type(var)))
+                raise TypeError(f'{name} must be a list, but got {type(var)}')
 
         num_augs = len(imgs)
         if num_augs != len(img_metas):
-            raise ValueError(
-                'num of augmentations ({}) != num of image meta ({})'.format(
-                    len(imgs), len(img_metas)))
+            raise ValueError(f'num of augmentations ({len(imgs)}) '
+                             f'!= num of image meta ({len(img_metas)})')
         # TODO: remove the restriction of samples_per_gpu == 1 when prepared
         samples_per_gpu = imgs[0].size(0)
         assert samples_per_gpu == 1
@@ -175,7 +171,7 @@ class BaseDetector(nn.Module, metaclass=ABCMeta):
         else:
             raise TypeError(
                 'dataset must be a valid dataset name or a sequence'
-                ' of class names, not {}'.format(type(dataset)))
+                f' of class names, not {type(dataset)}')
 
         for img, img_meta in zip(imgs, img_metas):
             h, w, _ = img_meta['img_shape']
diff --git a/mmdet/models/detectors/fast_rcnn.py b/mmdet/models/detectors/fast_rcnn.py
index 3bdc2686..062607a2 100644
--- a/mmdet/models/detectors/fast_rcnn.py
+++ b/mmdet/models/detectors/fast_rcnn.py
@@ -36,14 +36,12 @@ class FastRCNN(TwoStageDetector):
         """
         for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
             if not isinstance(var, list):
-                raise TypeError('{} must be a list, but got {}'.format(
-                    name, type(var)))
+                raise TypeError(f'{name} must be a list, but got {type(var)}')
 
         num_augs = len(imgs)
         if num_augs != len(img_metas):
-            raise ValueError(
-                'num of augmentations ({}) != num of image meta ({})'.format(
-                    len(imgs), len(img_metas)))
+            raise ValueError(f'num of augmentations ({len(imgs)}) '
+                             f'!= num of image meta ({len(img_metas)})')
         # TODO: remove the restriction of samples_per_gpu == 1 when prepared
         samples_per_gpu = imgs[0].size(0)
         assert samples_per_gpu == 1
diff --git a/mmdet/models/mask_heads/fcn_mask_head.py b/mmdet/models/mask_heads/fcn_mask_head.py
index 62849a58..abe915e7 100644
--- a/mmdet/models/mask_heads/fcn_mask_head.py
+++ b/mmdet/models/mask_heads/fcn_mask_head.py
@@ -38,9 +38,9 @@ class FCNMaskHead(nn.Module):
                 None, 'deconv', 'nearest', 'bilinear', 'carafe'
         ]:
             raise ValueError(
-                'Invalid upsample method {}, accepted methods '
-                'are "deconv", "nearest", "bilinear", "carafe"'.format(
-                    self.upsample_cfg['type']))
+                f'Invalid upsample method {self.upsample_cfg["type"]}, '
+                'accepted methods are "deconv", "nearest", "bilinear", '
+                '"carafe"')
         self.num_convs = num_convs
         # WARN: roi_feat_size is reserved and not used
         self.roi_feat_size = _pair(roi_feat_size)
diff --git a/mmdet/models/necks/fpn.py b/mmdet/models/necks/fpn.py
index 54daa8ca..7b5596ca 100644
--- a/mmdet/models/necks/fpn.py
+++ b/mmdet/models/necks/fpn.py
@@ -45,7 +45,7 @@ class FPN(nn.Module):
         >>> self = FPN(in_channels, 11, len(in_channels)).eval()
         >>> outputs = self.forward(inputs)
         >>> for i in range(len(outputs)):
-        ...     print('outputs[{}].shape = {!r}'.format(i, outputs[i].shape))
+        ...     print(f'outputs[{i}].shape = {outputs[i].shape}')
         outputs[0].shape = torch.Size([1, 11, 340, 340])
         outputs[1].shape = torch.Size([1, 11, 170, 170])
         outputs[2].shape = torch.Size([1, 11, 84, 84])
diff --git a/mmdet/models/roi_heads/cascade_roi_head.py b/mmdet/models/roi_heads/cascade_roi_head.py
index 6e42eb13..af7dca14 100644
--- a/mmdet/models/roi_heads/cascade_roi_head.py
+++ b/mmdet/models/roi_heads/cascade_roi_head.py
@@ -243,7 +243,7 @@ class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
                                                     rcnn_train_cfg)
 
             for name, value in bbox_results['loss_bbox'].items():
-                losses['s{}.{}'.format(i, name)] = (
+                losses[f's{i}.{name}'] = (
                     value * lw if 'loss' in name else value)
 
             # mask head forward and loss
@@ -254,7 +254,7 @@ class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
                 # TODO: Support empty tensor input. #2280
                 if mask_results['loss_mask'] is not None:
                     for name, value in mask_results['loss_mask'].items():
-                        losses['s{}.{}'.format(i, name)] = (
+                        losses[f's{i}.{name}'] = (
                             value * lw if 'loss' in name else value)
 
             # refine bboxes
diff --git a/mmdet/models/roi_heads/htc_roi_head.py b/mmdet/models/roi_heads/htc_roi_head.py
index 73426e38..c3eae5fc 100644
--- a/mmdet/models/roi_heads/htc_roi_head.py
+++ b/mmdet/models/roi_heads/htc_roi_head.py
@@ -250,7 +250,7 @@ class HybridTaskCascadeRoIHead(CascadeRoIHead):
             roi_labels = bbox_results['bbox_targets'][0]
 
             for name, value in bbox_results['loss_bbox'].items():
-                losses['s{}.{}'.format(i, name)] = (
+                losses[f's{i}.{name}'] = (
                     value * lw if 'loss' in name else value)
 
             # mask head forward and loss
@@ -280,7 +280,7 @@ class HybridTaskCascadeRoIHead(CascadeRoIHead):
                     i, x, sampling_results, gt_masks, rcnn_train_cfg,
                     semantic_feat)
                 for name, value in mask_results['loss_mask'].items():
-                    losses['s{}.{}'.format(i, name)] = (
+                    losses[f's{i}.{name}'] = (
                         value * lw if 'loss' in name else value)
 
             # refine bboxes (same as Cascade R-CNN)
diff --git a/mmdet/models/shared_heads/res_layer.py b/mmdet/models/shared_heads/res_layer.py
index ded192d3..cfff143d 100644
--- a/mmdet/models/shared_heads/res_layer.py
+++ b/mmdet/models/shared_heads/res_layer.py
@@ -43,7 +43,7 @@ class ResLayer(nn.Module):
             with_cp=with_cp,
             norm_cfg=self.norm_cfg,
             dcn=dcn)
-        self.add_module('layer{}'.format(stage + 1), res_layer)
+        self.add_module(f'layer{stage + 1}', res_layer)
 
     def init_weights(self, pretrained=None):
         if isinstance(pretrained, str):
@@ -60,7 +60,7 @@ class ResLayer(nn.Module):
 
     @auto_fp16()
     def forward(self, x):
-        res_layer = getattr(self, 'layer{}'.format(self.stage + 1))
+        res_layer = getattr(self, f'layer{self.stage + 1}')
         out = res_layer(x)
         return out
 
diff --git a/mmdet/ops/activation.py b/mmdet/ops/activation.py
index 013c5bb1..53b72b2a 100644
--- a/mmdet/ops/activation.py
+++ b/mmdet/ops/activation.py
@@ -28,7 +28,7 @@ def build_activation_layer(cfg):
 
     layer_type = cfg_.pop('type')
     if layer_type not in activation_cfg:
-        raise KeyError('Unrecognized activation type {}'.format(layer_type))
+        raise KeyError(f'Unrecognized activation type {layer_type}')
     else:
         activation = activation_cfg[layer_type]
         if activation is None:
diff --git a/mmdet/ops/carafe/grad_check.py b/mmdet/ops/carafe/grad_check.py
index 60b79d37..9ddb2398 100644
--- a/mmdet/ops/carafe/grad_check.py
+++ b/mmdet/ops/carafe/grad_check.py
@@ -39,9 +39,10 @@ for i in range(loop_num):
     torch.cuda.synchronize()
     time_backward += timer.since_last_check()
     bar.update()
-print('\nCARAFE time forward: {} ms/iter | time backward: {} ms/iter'.format(
-    (time_forward + 1e-3) * 1e3 / loop_num,
-    (time_backward + 1e-3) * 1e3 / loop_num))
+forward_speed = (time_forward + 1e-3) * 1e3 / loop_num
+backward_speed = (time_backward + 1e-3) * 1e3 / loop_num
+print(f'\nCARAFE time forward: {forward_speed} '
+      f'ms/iter | time backward: {backward_speed} ms/iter')
 
 time_naive_forward = 0
 time_naive_backward = 0
@@ -55,6 +56,7 @@ for i in range(loop_num):
     torch.cuda.synchronize()
     time_naive_backward += timer.since_last_check()
     bar.update()
-print('\nCARAFE naive time forward: {} ms/iter | time backward: {} ms/iter'.
-      format((time_naive_forward + 1e-3) * 1e3 / loop_num,
-             (time_naive_backward + 1e-3) * 1e3 / loop_num))
+forward_speed = (time_naive_forward + 1e-3) * 1e3 / loop_num
+backward_speed = (time_naive_backward + 1e-3) * 1e3 / loop_num
+print('\nCARAFE naive time forward: '
+      f'{forward_speed} ms/iter | time backward: {backward_speed} ms/iter')
diff --git a/mmdet/ops/conv.py b/mmdet/ops/conv.py
index a5efb837..8316001d 100644
--- a/mmdet/ops/conv.py
+++ b/mmdet/ops/conv.py
@@ -30,7 +30,7 @@ def build_conv_layer(cfg, *args, **kwargs):
 
     layer_type = cfg_.pop('type')
     if layer_type not in conv_cfg:
-        raise KeyError('Unrecognized norm type {}'.format(layer_type))
+        raise KeyError(f'Unrecognized norm type {layer_type}')
     else:
         conv_layer = conv_cfg[layer_type]
 
diff --git a/mmdet/ops/dcn/deform_conv.py b/mmdet/ops/dcn/deform_conv.py
index 36ab443a..29ba43d1 100644
--- a/mmdet/ops/dcn/deform_conv.py
+++ b/mmdet/ops/dcn/deform_conv.py
@@ -25,9 +25,8 @@ class DeformConvFunction(Function):
                 deformable_groups=1,
                 im2col_step=64):
         if input is not None and input.dim() != 4:
-            raise ValueError(
-                'Expected 4D tensor as input, got {}D tensor instead.'.format(
-                    input.dim()))
+            raise ValueError(f'Expected 4D tensor as input, got {input.dim()}'
+                             'D tensor instead.')
         ctx.stride = _pair(stride)
         ctx.padding = _pair(padding)
         ctx.dilation = _pair(dilation)
@@ -106,9 +105,8 @@ class DeformConvFunction(Function):
             stride_ = stride[d]
             output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, )
         if not all(map(lambda s: s > 0, output_size)):
-            raise ValueError(
-                'convolution input is too small (output would be {})'.format(
-                    'x'.join(map(str, output_size))))
+            raise ValueError('convolution input is too small (output would be '
+                             f'{"x".join(map(str, output_size))})')
         return output_size
 
 
@@ -205,11 +203,10 @@ class DeformConv(nn.Module):
 
         assert not bias
         assert in_channels % groups == 0, \
-            'in_channels {} cannot be divisible by groups {}'.format(
-                in_channels, groups)
+            f'in_channels {in_channels} is not divisible by groups {groups}'
         assert out_channels % groups == 0, \
-            'out_channels {} cannot be divisible by groups {}'.format(
-                out_channels, groups)
+            f'out_channels {out_channels} is not divisible ' \
+            f'by groups {groups}'
 
         self.in_channels = in_channels
         self.out_channels = out_channels
@@ -314,8 +311,8 @@ class DeformConvPack(DeformConv):
 
         if version is not None and version > 1:
             print_log(
-                'DeformConvPack {} is upgraded to version 2.'.format(
-                    prefix.rstrip('.')),
+                f'DeformConvPack {prefix.rstrip(".")} is upgraded to '
+                'version 2.',
                 logger='root')
 
         super()._load_from_state_dict(state_dict, prefix, local_metadata,
@@ -437,8 +434,8 @@ class ModulatedDeformConvPack(ModulatedDeformConv):
 
         if version is not None and version > 1:
             print_log(
-                'ModulatedDeformConvPack {} is upgraded to version 2.'.format(
-                    prefix.rstrip('.')),
+                f'ModulatedDeformConvPack {prefix.rstrip(".")} is upgraded to '
+                'version 2.',
                 logger='root')
 
         super()._load_from_state_dict(state_dict, prefix, local_metadata,
diff --git a/mmdet/ops/nms/nms_wrapper.py b/mmdet/ops/nms/nms_wrapper.py
index c48ec5cb..ed4ee612 100644
--- a/mmdet/ops/nms/nms_wrapper.py
+++ b/mmdet/ops/nms/nms_wrapper.py
@@ -39,12 +39,11 @@ def nms(dets, iou_thr, device_id=None):
         dets_th = dets
     elif isinstance(dets, np.ndarray):
         is_numpy = True
-        device = 'cpu' if device_id is None else 'cuda:{}'.format(device_id)
+        device = 'cpu' if device_id is None else f'cuda:{device_id}'
         dets_th = torch.from_numpy(dets).to(device)
     else:
-        raise TypeError(
-            'dets must be either a Tensor or numpy array, but got {}'.format(
-                type(dets)))
+        raise TypeError('dets must be either a Tensor or numpy array, '
+                        f'but got {type(dets)}')
 
     # execute cpu or cuda nms
     if dets_th.shape[0] == 0:
@@ -96,13 +95,12 @@ def soft_nms(dets, iou_thr, method='linear', sigma=0.5, min_score=1e-3):
         is_tensor = False
         dets_t = torch.from_numpy(dets)
     else:
-        raise TypeError(
-            'dets must be either a Tensor or numpy array, but got {}'.format(
-                type(dets)))
+        raise TypeError('dets must be either a Tensor or numpy array, '
+                        f'but got {type(dets)}')
 
     method_codes = {'linear': 1, 'gaussian': 2}
     if method not in method_codes:
-        raise ValueError('Invalid method for SoftNMS: {}'.format(method))
+        raise ValueError(f'Invalid method for SoftNMS: {method}')
     results = nms_ext.soft_nms(dets_t, iou_thr, method_codes[method], sigma,
                                min_score)
 
diff --git a/mmdet/ops/norm.py b/mmdet/ops/norm.py
index d5687cbd..99fc9311 100644
--- a/mmdet/ops/norm.py
+++ b/mmdet/ops/norm.py
@@ -30,7 +30,7 @@ def build_norm_layer(cfg, num_features, postfix=''):
 
     layer_type = cfg_.pop('type')
     if layer_type not in norm_cfg:
-        raise KeyError('Unrecognized norm type {}'.format(layer_type))
+        raise KeyError(f'Unrecognized norm type {layer_type}')
     else:
         abbr, norm_layer = norm_cfg[layer_type]
         if norm_layer is None:
diff --git a/mmdet/ops/plugin.py b/mmdet/ops/plugin.py
index a39a3123..a104c1f5 100644
--- a/mmdet/ops/plugin.py
+++ b/mmdet/ops/plugin.py
@@ -29,7 +29,7 @@ def build_plugin_layer(cfg, postfix='', **kwargs):
 
     layer_type = cfg_.pop('type')
     if layer_type not in plugin_cfg:
-        raise KeyError('Unrecognized plugin type {}'.format(layer_type))
+        raise KeyError(f'Unrecognized plugin type {layer_type}')
     else:
         abbr, plugin_layer = plugin_cfg[layer_type]
 
diff --git a/mmdet/ops/roi_align/roi_align.py b/mmdet/ops/roi_align/roi_align.py
index 441be14e..27be883b 100644
--- a/mmdet/ops/roi_align/roi_align.py
+++ b/mmdet/ops/roi_align/roi_align.py
@@ -144,9 +144,11 @@ class RoIAlign(nn.Module):
                              self.sample_num, self.aligned)
 
     def __repr__(self):
+        indent_str = '\n    '
         format_str = self.__class__.__name__
-        format_str += '(out_size={}, spatial_scale={}, sample_num={}'.format(
-            self.out_size, self.spatial_scale, self.sample_num)
-        format_str += ', use_torchvision={}, aligned={})'.format(
-            self.use_torchvision, self.aligned)
+        format_str += f'({indent_str}out_size={self.out_size},'
+        format_str += f'{indent_str}spatial_scale={self.spatial_scale},'
+        format_str += f'{indent_str}sample_num={self.sample_num},'
+        format_str += f'{indent_str}use_torchvision={self.use_torchvision},'
+        format_str += f'{indent_str}aligned={self.aligned})'
         return format_str
diff --git a/mmdet/ops/roi_pool/roi_pool.py b/mmdet/ops/roi_pool/roi_pool.py
index 5f52805a..13c2708b 100644
--- a/mmdet/ops/roi_pool/roi_pool.py
+++ b/mmdet/ops/roi_pool/roi_pool.py
@@ -69,7 +69,7 @@ class RoIPool(nn.Module):
 
     def __repr__(self):
         format_str = self.__class__.__name__
-        format_str += '(out_size={}, spatial_scale={}'.format(
-            self.out_size, self.spatial_scale)
-        format_str += ', use_torchvision={})'.format(self.use_torchvision)
+        format_str += f'(out_size={self.out_size}, '
+        format_str += f'spatial_scale={self.spatial_scale}, '
+        format_str += f'use_torchvision={self.use_torchvision})'
         return format_str
diff --git a/mmdet/ops/sigmoid_focal_loss/sigmoid_focal_loss.py b/mmdet/ops/sigmoid_focal_loss/sigmoid_focal_loss.py
index 62e584e6..0715af38 100644
--- a/mmdet/ops/sigmoid_focal_loss/sigmoid_focal_loss.py
+++ b/mmdet/ops/sigmoid_focal_loss/sigmoid_focal_loss.py
@@ -49,6 +49,6 @@ class SigmoidFocalLoss(nn.Module):
         return loss.sum()
 
     def __repr__(self):
-        tmpstr = self.__class__.__name__ + '(gamma={}, alpha={})'.format(
-            self.gamma, self.alpha)
+        tmpstr = self.__class__.__name__
+        tmpstr += f'(gamma={self.gamma}, alpha={self.alpha})'
         return tmpstr
diff --git a/mmdet/ops/upsample.py b/mmdet/ops/upsample.py
index 5501dc9a..2e405a03 100644
--- a/mmdet/ops/upsample.py
+++ b/mmdet/ops/upsample.py
@@ -69,7 +69,7 @@ def build_upsample_layer(cfg):
 
     layer_type = cfg_.pop('type')
     if layer_type not in upsample_cfg:
-        raise KeyError('Unrecognized upsample type {}'.format(layer_type))
+        raise KeyError(f'Unrecognized upsample type {layer_type}')
     else:
         upsample = upsample_cfg[layer_type]
         if upsample is None:
diff --git a/mmdet/utils/collect_env.py b/mmdet/utils/collect_env.py
index f84cd5b5..455383f0 100644
--- a/mmdet/utils/collect_env.py
+++ b/mmdet/utils/collect_env.py
@@ -27,7 +27,7 @@ def collect_env():
             try:
                 nvcc = osp.join(CUDA_HOME, 'bin/nvcc')
                 nvcc = subprocess.check_output(
-                    '"{}" -V | tail -n1'.format(nvcc), shell=True)
+                    f'"{nvcc}" -V | tail -n1', shell=True)
                 nvcc = nvcc.decode('utf-8').strip()
             except subprocess.SubprocessError:
                 nvcc = 'Not Available'
@@ -60,4 +60,4 @@ def collect_env():
 
 if __name__ == '__main__':
     for name, val in collect_env().items():
-        print('{}: {}'.format(name, val))
+        print(f'{name}: {val}')
diff --git a/mmdet/utils/contextmanagers.py b/mmdet/utils/contextmanagers.py
index 3e81268a..11eb2e5d 100644
--- a/mmdet/utils/contextmanagers.py
+++ b/mmdet/utils/contextmanagers.py
@@ -85,7 +85,7 @@ async def completed(trace_name='',
             stream_times_ms = ''
             for i, stream in enumerate(streams):
                 elapsed_time = start.elapsed_time(end_events[i])
-                stream_times_ms += ' {} {:.2f} ms'.format(stream, elapsed_time)
+                stream_times_ms += f' {stream} {elapsed_time:.2f} ms'
             logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,
                         stream_times_ms)
 
diff --git a/mmdet/utils/flops_counter.py b/mmdet/utils/flops_counter.py
index df2163fd..fb484e30 100644
--- a/mmdet/utils/flops_counter.py
+++ b/mmdet/utils/flops_counter.py
@@ -125,7 +125,7 @@ def print_model_with_flops(model, units='GMac', precision=3, ost=sys.stdout):
         return ', '.join([
             flops_to_string(
                 accumulated_flops_cost, units=units, precision=precision),
-            '{:.3%} MACs'.format(accumulated_flops_cost / total_flops),
+            f'{accumulated_flops_cost / total_flops:.3%} MACs',
             self.original_extra_repr()
         ])
 
diff --git a/mmdet/utils/logger.py b/mmdet/utils/logger.py
index 3e6a1396..825ee02e 100644
--- a/mmdet/utils/logger.py
+++ b/mmdet/utils/logger.py
@@ -63,4 +63,4 @@ def print_log(msg, logger=None, level=logging.INFO):
     elif logger != 'silent':
         raise TypeError(
             'logger should be either a logging.Logger object, "root", '
-            '"silent" or None, but got {}'.format(logger))
+            f'"silent" or None, but got {logger}')
diff --git a/mmdet/utils/profiling.py b/mmdet/utils/profiling.py
index 2448ba05..76428968 100644
--- a/mmdet/utils/profiling.py
+++ b/mmdet/utils/profiling.py
@@ -35,7 +35,6 @@ if sys.version_info >= (3, 7):
             end.synchronize()
             cpu_time = (cpu_end - cpu_start) * 1000
             gpu_time = start.elapsed_time(end)
-            msg = '{} {} cpu_time {:.2f} ms '.format(trace_name, name,
-                                                     cpu_time)
-            msg += 'gpu_time {:.2f} ms stream {}'.format(gpu_time, stream)
+            msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms '
+            msg += f'gpu_time {gpu_time:.2f} ms stream {stream}'
             print(msg, end_stream)
diff --git a/mmdet/utils/util_mixins.py b/mmdet/utils/util_mixins.py
index e938ec75..f00d4f94 100644
--- a/mmdet/utils/util_mixins.py
+++ b/mmdet/utils/util_mixins.py
@@ -20,8 +20,8 @@ Example:
     ...        return self.name
     >>> s1 = Student('Alice')
     >>> s2 = Student('Bob')
-    >>> print('s1 = {}'.format(s1))
-    >>> print('s2 = {}'.format(s2))
+    >>> print(f's1 = {s1}')
+    >>> print(f's2 = {s2}')
     s1 = <Student(Alice)>
     s2 = <Student(Bob)>
 
@@ -33,7 +33,7 @@ Example:
     ...    def __len__(self):
     ...        return len(self.data)
     >>> g = Group([1, 2, 3])
-    >>> print('g = {}'.format(g))
+    >>> print(f'g = {g}')
     g = <Group(3)>
 
 """
@@ -83,13 +83,13 @@ class NiceRepr(object):
         else:
             # In all other cases force the subclass to overload __nice__
             raise NotImplementedError(
-                'Define the __nice__ method for {!r}'.format(self.__class__))
+                f'Define the __nice__ method for {self.__class__!r}')
 
     def __repr__(self):
         try:
             nice = self.__nice__()
             classname = self.__class__.__name__
-            return '<{0}({1}) at {2}>'.format(classname, nice, hex(id(self)))
+            return f'<{classname}({nice}) at {hex(id(self))}>'
         except NotImplementedError as ex:
             warnings.warn(str(ex), category=RuntimeWarning)
             return object.__repr__(self)
@@ -98,7 +98,7 @@ class NiceRepr(object):
         try:
             classname = self.__class__.__name__
             nice = self.__nice__()
-            return '<{0}({1})>'.format(classname, nice)
+            return f'<{classname}({nice})>'
         except NotImplementedError as ex:
             warnings.warn(str(ex), category=RuntimeWarning)
             return object.__repr__(self)
diff --git a/setup.py b/setup.py
index e70a5311..390d08a1 100755
--- a/setup.py
+++ b/setup.py
@@ -20,9 +20,9 @@ MINOR = 1
 PATCH = 0
 SUFFIX = ''
 if PATCH != '':
-    SHORT_VERSION = '{}.{}.{}{}'.format(MAJOR, MINOR, PATCH, SUFFIX)
+    SHORT_VERSION = f'{MAJOR}.{MINOR}.{PATCH}{SUFFIX}'
 else:
-    SHORT_VERSION = '{}.{}{}'.format(MAJOR, MINOR, SUFFIX)
+    SHORT_VERSION = f'{MAJOR}.{MINOR}{SUFFIX}'
 
 version_file = 'mmdet/version.py'
 
@@ -103,12 +103,12 @@ def make_cuda_ext(name, module, sources, sources_cuda=[]):
         ]
         sources += sources_cuda
     else:
-        print('Compiling {} without CUDA'.format(name))
+        print(f'Compiling {name} without CUDA')
         extension = CppExtension
         # raise EnvironmentError('CUDA is required to compile MMDetection!')
 
     return extension(
-        name='{}.{}'.format(module, name),
+        name=f'{module}.{name}',
         sources=[os.path.join(*module.split('.'), p) for p in sources],
         define_macros=define_macros,
         extra_compile_args=extra_compile_args)
diff --git a/tests/async_benchmark.py b/tests/async_benchmark.py
index d63c56bf..eb76c36b 100644
--- a/tests/async_benchmark.py
+++ b/tests/async_benchmark.py
@@ -39,13 +39,13 @@ async def main():
     if not os.path.exists(checkpoint_file):
         url = ('https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection'
                '/models/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth')
-        print('Downloading {} ...'.format(url))
+        print(f'Downloading {url} ...')
         local_filename, _ = urllib.request.urlretrieve(url)
         os.makedirs(os.path.dirname(checkpoint_file), exist_ok=True)
         shutil.move(local_filename, checkpoint_file)
-        print('Saved as {}'.format(checkpoint_file))
+        print(f'Saved as {checkpoint_file}')
     else:
-        print('Using existing checkpoint {}'.format(checkpoint_file))
+        print(f'Using existing checkpoint {checkpoint_file}')
 
     device = 'cuda:0'
     model = init_detector(
diff --git a/tests/test_backbone.py b/tests/test_backbone.py
index 2cc0ac35..6f3d1f9d 100644
--- a/tests/test_backbone.py
+++ b/tests/test_backbone.py
@@ -342,7 +342,7 @@ def test_resnet_backbone():
         for param in layer.parameters():
             assert param.requires_grad is False
     for i in range(1, frozen_stages + 1):
-        layer = getattr(model, 'layer{}'.format(i))
+        layer = getattr(model, f'layer{i}')
         for mod in layer.modules():
             if isinstance(mod, _BatchNorm):
                 assert mod.training is False
@@ -358,7 +358,7 @@ def test_resnet_backbone():
     for param in model.stem.parameters():
         assert param.requires_grad is False
     for i in range(1, frozen_stages + 1):
-        layer = getattr(model, 'layer{}'.format(i))
+        layer = getattr(model, f'layer{i}')
         for mod in layer.modules():
             if isinstance(mod, _BatchNorm):
                 assert mod.training is False
diff --git a/tests/test_config.py b/tests/test_config.py
index 68cb2fad..7ee543d2 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -29,14 +29,14 @@ def test_config_build_detector():
     from mmdet.models import build_detector
 
     config_dpath = _get_config_directory()
-    print('Found config_dpath = {!r}'.format(config_dpath))
+    print(f'Found config_dpath = {config_dpath}')
 
     import glob
     config_fpaths = list(glob.glob(join(config_dpath, '**', '*.py')))
     config_fpaths = [p for p in config_fpaths if p.find('_base_') == -1]
     config_names = [relpath(p, config_dpath) for p in config_fpaths]
 
-    print('Using {} config files'.format(len(config_names)))
+    print(f'Using {len(config_names)} config files')
 
     for config_fname in config_names:
         config_fpath = join(config_dpath, config_fname)
@@ -45,7 +45,7 @@ def test_config_build_detector():
         config_mod.model
         config_mod.train_cfg
         config_mod.test_cfg
-        print('Building detector, config_fpath = {!r}'.format(config_fpath))
+        print(f'Building detector, config_fpath = {config_fpath}')
 
         # Remove pretrained keys to allow for testing in an offline environment
         if 'pretrained' in config_mod.model:
@@ -87,7 +87,7 @@ def test_config_data_pipeline():
     import numpy as np
 
     config_dpath = _get_config_directory()
-    print('Found config_dpath = {!r}'.format(config_dpath))
+    print(f'Found config_dpath = {config_dpath}')
 
     # Only tests a representative subset of configurations
     # TODO: test pipelines using Albu, current Albu throw None given empty GT
@@ -117,7 +117,7 @@ def test_config_data_pipeline():
             masks = PolygonMasks(masks, h, w)
         return masks
 
-    print('Using {} config files'.format(len(config_names)))
+    print(f'Using {len(config_names)} config files')
 
     for config_fname in config_names:
         config_fpath = join(config_dpath, config_fname)
@@ -131,10 +131,9 @@ def test_config_data_pipeline():
         train_pipeline = Compose(config_mod.train_pipeline)
         test_pipeline = Compose(config_mod.test_pipeline)
 
-        print(
-            'Building data pipeline, config_fpath = {!r}'.format(config_fpath))
+        print(f'Building data pipeline, config_fpath = {config_fpath}')
 
-        print('Test training data pipeline: \n{!r}'.format(train_pipeline))
+        print(f'Test training data pipeline: \n{train_pipeline!r}')
         img = np.random.randint(0, 255, size=(888, 666, 3), dtype=np.uint8)
         if loading_pipeline.get('to_float32', False):
             img = img.astype(np.float32)
@@ -154,7 +153,7 @@ def test_config_data_pipeline():
         output_results = train_pipeline(results)
         assert output_results is not None
 
-        print('Test testing data pipeline: \n{!r}'.format(test_pipeline))
+        print(f'Test testing data pipeline: \n{test_pipeline!r}')
         results = dict(
             filename='test_img.png',
             img=img,
@@ -170,8 +169,8 @@ def test_config_data_pipeline():
         assert output_results is not None
 
         # test empty GT
-        print('Test empty GT with training data pipeline: \n{!r}'.format(
-            train_pipeline))
+        print('Test empty GT with training data pipeline: '
+              f'\n{train_pipeline!r}')
         results = dict(
             filename='test_img.png',
             img=img,
@@ -187,8 +186,7 @@ def test_config_data_pipeline():
         output_results = train_pipeline(results)
         assert output_results is not None
 
-        print('Test empty GT with testing data pipeline: \n{!r}'.format(
-            test_pipeline))
+        print(f'Test empty GT with testing data pipeline: \n{test_pipeline!r}')
         results = dict(
             filename='test_img.png',
             img=img,
diff --git a/tests/test_heads.py b/tests/test_heads.py
index 923b9bc1..64a74194 100644
--- a/tests/test_heads.py
+++ b/tests/test_heads.py
@@ -334,7 +334,7 @@ def test_refine_boxes():
             n_img = demokw['n_img']
             rng = demokw['rng']
 
-            print('Test refine_boxes case: {!r}'.format(demokw))
+            print(f'Test refine_boxes case: {demokw!r}')
             tup = _demodata_refine_boxes(n_roi, n_img, rng=rng)
             rois, labels, bbox_preds, pos_is_gts, img_metas = tup
             bboxes_list = self.refine_bboxes(rois, labels, bbox_preds,
@@ -343,7 +343,7 @@ def test_refine_boxes():
             assert sum(map(len, bboxes_list)) <= n_roi
             assert all(b.shape[1] == 4 for b in bboxes_list)
         except Exception:
-            print('Test failed with demokw={!r}'.format(demokw))
+            print(f'Test failed with demokw={demokw!r}')
             raise
 
 
diff --git a/tests/test_nms.py b/tests/test_nms.py
index e99af88e..a2285dec 100644
--- a/tests/test_nms.py
+++ b/tests/test_nms.py
@@ -57,7 +57,7 @@ def test_nms_device_and_dtypes_gpu():
                           [35.2, 11.7, 39.7, 15.7, 0.3]])
 
     for device_id in range(torch.cuda.device_count()):
-        print('Run NMS on device_id = {!r}'.format(device_id))
+        print(f'Run NMS on device_id = {device_id!r}')
         # GPU can handle float32 but not float64
         dets = base_dets.astype(np.float32)
         supressed, inds = nms(dets, iou_thr, device_id)
diff --git a/tests/test_wrappers.py b/tests/test_wrappers.py
index e60f93ab..1ae38f70 100644
--- a/tests/test_wrappers.py
+++ b/tests/test_wrappers.py
@@ -171,7 +171,7 @@ def test_nn_op_forward_called():
     torch.__version__ = '1.4.1'
 
     for m in ['Conv2d', 'ConvTranspose2d', 'MaxPool2d']:
-        with patch('torch.nn.{}.forward'.format(m)) as nn_module_forward:
+        with patch(f'torch.nn.{m}.forward') as nn_module_forward:
             # randn input
             x_empty = torch.randn(0, 3, 10, 10)
             wrapper = eval(m)(3, 2, 1)
diff --git a/tools/analyze_logs.py b/tools/analyze_logs.py
index 0608a75c..89edc101 100644
--- a/tools/analyze_logs.py
+++ b/tools/analyze_logs.py
@@ -9,8 +9,7 @@ import seaborn as sns
 
 def cal_train_time(log_dicts, args):
     for i, log_dict in enumerate(log_dicts):
-        print('{}Analyze train time of {}{}'.format('-' * 5, args.json_logs[i],
-                                                    '-' * 5))
+        print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}')
         all_times = []
         for epoch in log_dict.keys():
             if args.include_outliers:
@@ -22,12 +21,12 @@ def cal_train_time(log_dicts, args):
         slowest_epoch = epoch_ave_time.argmax()
         fastest_epoch = epoch_ave_time.argmin()
         std_over_epoch = epoch_ave_time.std()
-        print('slowest epoch {}, average time is {:.4f}'.format(
-            slowest_epoch + 1, epoch_ave_time[slowest_epoch]))
-        print('fastest epoch {}, average time is {:.4f}'.format(
-            fastest_epoch + 1, epoch_ave_time[fastest_epoch]))
-        print('time std over epochs is {:.4f}'.format(std_over_epoch))
-        print('average iter time: {:.4f} s/iter'.format(np.mean(all_times)))
+        print(f'slowest epoch {slowest_epoch + 1}, '
+              f'average time is {epoch_ave_time[slowest_epoch]:.4f}')
+        print(f'fastest epoch {fastest_epoch + 1}, '
+              f'average time is {epoch_ave_time[fastest_epoch]:.4f}')
+        print(f'time std over epochs is {std_over_epoch:.4f}')
+        print(f'average iter time: {np.mean(all_times):.4f} s/iter')
         print()
 
 
@@ -41,7 +40,7 @@ def plot_curve(log_dicts, args):
         legend = []
         for json_log in args.json_logs:
             for metric in args.keys:
-                legend.append('{}_{}'.format(json_log, metric))
+                legend.append(f'{json_log}_{metric}')
     assert len(legend) == (len(args.json_logs) * len(args.keys))
     metrics = args.keys
 
@@ -49,11 +48,10 @@ def plot_curve(log_dicts, args):
     for i, log_dict in enumerate(log_dicts):
         epochs = list(log_dict.keys())
         for j, metric in enumerate(metrics):
-            print('plot curve of {}, metric is {}'.format(
-                args.json_logs[i], metric))
+            print(f'plot curve of {args.json_logs[i]}, metric is {metric}')
             if metric not in log_dict[epochs[0]]:
-                raise KeyError('{} does not contain metric {}'.format(
-                    args.json_logs[i], metric))
+                raise KeyError(
+                    f'{args.json_logs[i]} does not contain metric {metric}')
 
             if 'mAP' in metric:
                 xs = np.arange(1, max(epochs) + 1)
@@ -86,7 +84,7 @@ def plot_curve(log_dicts, args):
     if args.out is None:
         plt.show()
     else:
-        print('save curve to: {}'.format(args.out))
+        print(f'save curve to: {args.out}')
         plt.savefig(args.out)
         plt.cla()
 
diff --git a/tools/coco_error_analysis.py b/tools/coco_error_analysis.py
index 6aeadadb..fba96caf 100644
--- a/tools/coco_error_analysis.py
+++ b/tools/coco_error_analysis.py
@@ -37,7 +37,7 @@ def makeplot(rs, ps, outDir, class_name, iou_type):
                 ps_curve[k],
                 ps_curve[k + 1],
                 color=cs[k],
-                label=str('[{:.3f}'.format(aps[k]) + ']' + types[k]))
+                label=str(f'[{aps[k]:.3f}]' + types[k]))
         plt.xlabel('recall')
         plt.ylabel('precision')
         plt.xlim(0, 1.)
@@ -45,14 +45,13 @@ def makeplot(rs, ps, outDir, class_name, iou_type):
         plt.title(figure_tile)
         plt.legend()
         # plt.show()
-        fig.savefig(outDir + '/{}.png'.format(figure_tile))
+        fig.savefig(outDir + f'/{figure_tile}.png')
         plt.close(fig)
 
 
 def analyze_individual_category(k, cocoDt, cocoGt, catId, iou_type):
     nm = cocoGt.loadCats(catId)[0]
-    print('--------------analyzing {}-{}---------------'.format(
-        k + 1, nm['name']))
+    print(f'--------------analyzing {k + 1}-{nm["name"]}---------------')
     ps_ = {}
     dt = copy.deepcopy(cocoDt)
     nm = cocoGt.loadCats(catId)[0]
@@ -107,7 +106,7 @@ def analyze_results(res_file, ann_file, res_types, out_dir):
 
     directory = os.path.dirname(out_dir + '/')
     if not os.path.exists(directory):
-        print('-------------create {}-----------------'.format(out_dir))
+        print(f'-------------create {out_dir}-----------------')
         os.makedirs(directory)
 
     cocoGt = COCO(ann_file)
@@ -117,8 +116,7 @@ def analyze_results(res_file, ann_file, res_types, out_dir):
         res_out_dir = out_dir + '/' + res_type + '/'
         res_directory = os.path.dirname(res_out_dir)
         if not os.path.exists(res_directory):
-            print(
-                '-------------create {}-----------------'.format(res_out_dir))
+            print(f'-------------create {res_out_dir}-----------------')
             os.makedirs(res_directory)
         iou_type = res_type
         cocoEval = COCOeval(
@@ -138,8 +136,7 @@ def analyze_results(res_file, ann_file, res_types, out_dir):
             analyze_results = pool.starmap(analyze_individual_category, args)
         for k, catId in enumerate(catIds):
             nm = cocoGt.loadCats(catId)[0]
-            print('--------------saving {}-{}---------------'.format(
-                k + 1, nm['name']))
+            print(f'--------------saving {k + 1}-{nm["name"]}---------------')
             analyze_result = analyze_results[k]
             assert k == analyze_result[0]
             ps_supercategory = analyze_result[1]['ps_supercategory']
diff --git a/tools/convert_datasets/cityscapes.py b/tools/convert_datasets/cityscapes.py
index ae4fe3a3..86ef84fa 100644
--- a/tools/convert_datasets/cityscapes.py
+++ b/tools/convert_datasets/cityscapes.py
@@ -19,8 +19,8 @@ def collect_files(img_dir, gt_dir):
         segm_file = gt_dir + img_file[
             len(img_dir):-len(suffix)] + 'gtFine_labelIds.png'
         files.append((img_file, inst_file, segm_file))
-    assert len(files), 'No images found in {}'.format(img_dir)
-    print('Loaded {} images from {}'.format(len(files), img_dir))
+    assert len(files), f'No images found in {img_dir}'
+    print(f'Loaded {len(files)} images from {img_dir}')
 
     return files
 
@@ -138,7 +138,7 @@ def main():
         test='instancesonly_filtered_gtFine_test.json')
 
     for split, json_name in set_name.items():
-        print('Converting {} into {}'.format(split, json_name))
+        print(f'Converting {split} into {json_name}')
         with mmcv.Timer(
                 print_tmpl='It tooks {}s to convert Cityscapes annotation'):
             files = collect_files(
diff --git a/tools/convert_datasets/pascal_voc.py b/tools/convert_datasets/pascal_voc.py
index 4c30c0fb..307c93cb 100644
--- a/tools/convert_datasets/pascal_voc.py
+++ b/tools/convert_datasets/pascal_voc.py
@@ -70,20 +70,18 @@ def cvt_annotations(devkit_path, years, split, out_file):
     annotations = []
     for year in years:
         filelist = osp.join(devkit_path,
-                            'VOC{}/ImageSets/Main/{}.txt'.format(year, split))
+                            f'VOC{year}/ImageSets/Main/{split}.txt')
         if not osp.isfile(filelist):
-            print('filelist does not exist: {}, skip voc{} {}'.format(
-                filelist, year, split))
+            print(f'filelist does not exist: {filelist}, '
+                  f'skip voc{year} {split}')
             return
         img_names = mmcv.list_from_file(filelist)
         xml_paths = [
-            osp.join(devkit_path,
-                     'VOC{}/Annotations/{}.xml'.format(year, img_name))
+            osp.join(devkit_path, f'VOC{year}/Annotations/{img_name}.xml')
             for img_name in img_names
         ]
         img_paths = [
-            'VOC{}/JPEGImages/{}.jpg'.format(year, img_name)
-            for img_name in img_names
+            f'VOC{year}/JPEGImages/{img_name}.jpg' for img_name in img_names
         ]
         part_annotations = mmcv.track_progress(parse_xml,
                                                list(zip(xml_paths, img_paths)))
@@ -115,8 +113,8 @@ def main():
     if '2007' in years and '2012' in years:
         years.append(['2007', '2012'])
     if not years:
-        raise IOError('The devkit path {} contains neither "VOC2007" nor '
-                      '"VOC2012" subfolder'.format(devkit_path))
+        raise IOError(f'The devkit path {devkit_path} contains neither '
+                      '"VOC2007" nor "VOC2012" subfolder')
     for year in years:
         if year == '2007':
             prefix = 'voc07'
@@ -126,12 +124,12 @@ def main():
             prefix = 'voc0712'
         for split in ['train', 'val', 'trainval']:
             dataset_name = prefix + '_' + split
-            print('processing {} ...'.format(dataset_name))
+            print(f'processing {dataset_name} ...')
             cvt_annotations(devkit_path, year, split,
                             osp.join(out_dir, dataset_name + '.pkl'))
         if not isinstance(year, list):
             dataset_name = prefix + '_test'
-            print('processing {} ...'.format(dataset_name))
+            print(f'processing {dataset_name} ...')
             cvt_annotations(devkit_path, year, 'test',
                             osp.join(out_dir, dataset_name + '.pkl'))
     print('Done!')
diff --git a/tools/detectron2pytorch.py b/tools/detectron2pytorch.py
index 0a90ad17..961e6f57 100644
--- a/tools/detectron2pytorch.py
+++ b/tools/detectron2pytorch.py
@@ -48,27 +48,21 @@ def convert(src, dst, depth):
     for i in range(1, len(block_nums) + 1):
         for j in range(block_nums[i - 1]):
             if j == 0:
-                convert_conv_fc(blobs, state_dict,
-                                'res{}_{}_branch1'.format(i + 1, j),
-                                'layer{}.{}.downsample.0'.format(i, j),
-                                converted_names)
-                convert_bn(blobs, state_dict,
-                           'res{}_{}_branch1_bn'.format(i + 1, j),
-                           'layer{}.{}.downsample.1'.format(i, j),
-                           converted_names)
+                convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch1',
+                                f'layer{i}.{j}.downsample.0', converted_names)
+                convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch1_bn',
+                           f'layer{i}.{j}.downsample.1', converted_names)
             for k, letter in enumerate(['a', 'b', 'c']):
                 convert_conv_fc(blobs, state_dict,
-                                'res{}_{}_branch2{}'.format(i + 1, j, letter),
-                                'layer{}.{}.conv{}'.format(i, j, k + 1),
-                                converted_names)
+                                f'res{i + 1}_{j}_branch2{letter}',
+                                f'layer{i}.{j}.conv{k+1}', converted_names)
                 convert_bn(blobs, state_dict,
-                           'res{}_{}_branch2{}_bn'.format(i + 1, j, letter),
-                           'layer{}.{}.bn{}'.format(i, j,
-                                                    k + 1), converted_names)
+                           f'res{i + 1}_{j}_branch2{letter}_bn',
+                           f'layer{i}.{j}.bn{k + 1}', converted_names)
     # check if all layers are converted
     for key in blobs:
         if key not in converted_names:
-            print('Not Convert: {}'.format(key))
+            print(f'Not Convert: {key}')
     # save checkpoint
     checkpoint = dict()
     checkpoint['state_dict'] = state_dict
diff --git a/tools/get_flops.py b/tools/get_flops.py
index 6c9cb234..1ecd8757 100644
--- a/tools/get_flops.py
+++ b/tools/get_flops.py
@@ -44,8 +44,8 @@ def main():
 
     flops, params = get_model_complexity_info(model, input_shape)
     split_line = '=' * 30
-    print('{0}\nInput shape: {1}\nFlops: {2}\nParams: {3}\n{0}'.format(
-        split_line, input_shape, flops, params))
+    print(f'{split_line}\nInput shape: {input_shape}\n'
+          f'Flops: {flops}\nParams: {params}\n{split_line}')
     print('!!!Please be cautious if you use the results in papers. '
           'You may need to check if all ops are supported and verify that the '
           'flops computation is correct.')
diff --git a/tools/publish_model.py b/tools/publish_model.py
index a049f176..b638d25c 100644
--- a/tools/publish_model.py
+++ b/tools/publish_model.py
@@ -22,7 +22,7 @@ def process_checkpoint(in_file, out_file):
     # add the code here.
     torch.save(checkpoint, out_file)
     sha = subprocess.check_output(['sha256sum', out_file]).decode()
-    final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
+    final_file = out_file.rstrip('.pth') + f'-{sha[:8]}.pth'
     subprocess.Popen(['mv', out_file, final_file])
 
 
diff --git a/tools/pytorch2onnx.py b/tools/pytorch2onnx.py
index 4f3cf2d8..e26213bb 100644
--- a/tools/pytorch2onnx.py
+++ b/tools/pytorch2onnx.py
@@ -51,7 +51,7 @@ def export_onnx_model(model, inputs, passes):
     if passes is not None:
         all_passes = optimizer.get_available_passes()
         assert all(p in all_passes for p in passes), \
-            'Only {} are supported'.format(all_passes)
+            f'Only {all_passes} are supported'
     onnx_model = optimizer.optimize(onnx_model, passes)
     return onnx_model
 
@@ -108,7 +108,7 @@ def main():
     else:
         raise NotImplementedError(
             'ONNX conversion is currently not currently supported with '
-            '{}'.format(model.__class__.__name__))
+            f'{model.__class__.__name__}')
 
     input_data = torch.empty((1, *input_shape),
                              dtype=next(model.parameters()).dtype,
@@ -117,7 +117,7 @@ def main():
     onnx_model = export_onnx_model(model, (input_data, ), args.passes)
     # Print a human readable representation of the graph
     onnx.helper.printable_graph(onnx_model.graph)
-    print('saving model in {}'.format(args.out))
+    print(f'saving model in {args.out}')
     onnx.save(onnx_model, args.out)
 
 
diff --git a/tools/robustness_eval.py b/tools/robustness_eval.py
index 1ff3ac07..a1b4ce88 100644
--- a/tools/robustness_eval.py
+++ b/tools/robustness_eval.py
@@ -8,14 +8,13 @@ import numpy as np
 def print_coco_results(results):
 
     def _print(result, ap=1, iouThr=None, areaRng='all', maxDets=100):
-        iStr = ' {:<18} {} @[ IoU={:<9} | \
-        area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
-
         titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
         typeStr = '(AP)' if ap == 1 else '(AR)'
-        iouStr = '{:0.2f}:{:0.2f}'.format(.5, .95) \
-            if iouThr is None else '{:0.2f}'.format(iouThr)
-        print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, result))
+        iouStr = '0.50:0.95' \
+            if iouThr is None else f'{iouThr:0.2f}'
+        iStr = f' {titleStr:<18} {typeStr} @[ IoU={iouStr:<9} | '
+        iStr += f'area={areaRng:>6s} | maxDets={maxDets:>3d} ] = {result:0.3f}'
+        print(iStr)
 
     stats = np.zeros((12, ))
     stats[0] = _print(results[0], 1)
@@ -81,33 +80,30 @@ def get_coco_style_results(filename,
         mPC = np.mean(results[:, 1:, :], axis=(0, 1))
     rPC = mPC / P
 
-    print('\nmodel: {}'.format(osp.basename(filename)))
+    print(f'\nmodel: {osp.basename(filename)}')
     if metric is None:
         if 'P' in prints:
-            print('Performance on Clean Data [P] ({})'.format(task))
+            print(f'Performance on Clean Data [P] ({task})')
             print_coco_results(P)
         if 'mPC' in prints:
-            print('Mean Performance under Corruption [mPC] ({})'.format(task))
+            print(f'Mean Performance under Corruption [mPC] ({task})')
             print_coco_results(mPC)
         if 'rPC' in prints:
-            print('Realtive Performance under Corruption [rPC] ({})'.format(
-                task))
+            print(f'Realtive Performance under Corruption [rPC] ({task})')
             print_coco_results(rPC)
     else:
         if 'P' in prints:
-            print('Performance on Clean Data [P] ({})'.format(task))
+            print(f'Performance on Clean Data [P] ({task})')
             for metric_i, metric_name in enumerate(metrics):
-                print('{:5} =  {:0.3f}'.format(metric_name, P[metric_i]))
+                print(f'{metric_name:5} =  {P[metric_i]:0.3f}')
         if 'mPC' in prints:
-            print('Mean Performance under Corruption [mPC] ({})'.format(task))
+            print(f'Mean Performance under Corruption [mPC] ({task})')
             for metric_i, metric_name in enumerate(metrics):
-                print('{:5} =  {:0.3f}'.format(metric_name, mPC[metric_i]))
+                print(f'{metric_name:5} =  {mPC[metric_i]:0.3f}')
         if 'rPC' in prints:
-            print('Relative Performance under Corruption [rPC] ({})'.format(
-                task))
+            print(f'Relative Performance under Corruption [rPC] ({task})')
             for metric_i, metric_name in enumerate(metrics):
-                print('{:5} => {:0.1f} %'.format(metric_name,
-                                                 rPC[metric_i] * 100))
+                print(f'{metric_name:5} => {rPC[metric_i] * 100:0.1f} %')
 
     return results
 
@@ -143,17 +139,15 @@ def get_voc_style_results(filename, prints='mPC', aggregate='benchmark'):
         mPC = np.mean(results[:, 1:, :], axis=(0, 1))
     rPC = mPC / P
 
-    print('\nmodel: {}'.format(osp.basename(filename)))
+    print(f'\nmodel: {osp.basename(filename)}')
     if 'P' in prints:
-        print('{:48} = {:0.3f}'.format('Performance on Clean Data [P] in AP50',
-                                       np.mean(P)))
+        print(f'Performance on Clean Data [P] in AP50 = {np.mean(P):0.3f}')
     if 'mPC' in prints:
-        print('{:48} = {:0.3f}'.format(
-            'Mean Performance under Corruption [mPC] in AP50', np.mean(mPC)))
+        print('Mean Performance under Corruption [mPC] in AP50 = '
+              f'{np.mean(mPC):0.3f}')
     if 'rPC' in prints:
-        print('{:48} = {:0.1f}'.format(
-            'Realtive Performance under Corruption [rPC] in %',
-            np.mean(rPC) * 100))
+        print('Realtive Performance under Corruption [rPC] in % = '
+              f'{np.mean(rPC) * 100:0.1f}')
 
     return np.mean(results, axis=2, keepdims=True)
 
diff --git a/tools/test.py b/tools/test.py
index c5602977..55a4f283 100644
--- a/tools/test.py
+++ b/tools/test.py
@@ -157,7 +157,7 @@ def main():
     rank, _ = get_dist_info()
     if rank == 0:
         if args.out:
-            print('\nwriting results to {}'.format(args.out))
+            print(f'\nwriting results to {args.out}')
             mmcv.dump(outputs, args.out)
         kwargs = {} if args.options is None else args.options
         if args.format_only:
diff --git a/tools/test_robustness.py b/tools/test_robustness.py
index e6b19db4..ecc366f5 100644
--- a/tools/test_robustness.py
+++ b/tools/test_robustness.py
@@ -152,7 +152,7 @@ def collect_results(result_part, size, tmpdir=None):
     else:
         mmcv.mkdir_or_exist(tmpdir)
     # dump the part result to the dir
-    mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
+    mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
     dist.barrier()
     # collect all parts
     if rank != 0:
@@ -161,7 +161,7 @@ def collect_results(result_part, size, tmpdir=None):
         # load results of all parts from tmp dir
         part_list = []
         for i in range(world_size):
-            part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
+            part_file = osp.join(tmpdir, f'part_{i}.pkl')
             part_list.append(mmcv.load(part_file))
         # sort the results
         ordered_results = []
@@ -333,8 +333,7 @@ def main():
                 test_data_cfg['pipeline'].insert(1, corruption_trans)
 
             # print info
-            print('\nTesting {} at severity {}'.format(corruption,
-                                                       corruption_severity))
+            print(f'\nTesting {corruption} at severity {corruption_severity}')
 
             # build the dataloader
             # TODO: support multiple images per gpu
@@ -396,8 +395,7 @@ def main():
                                 is supported for pascal voc')
                 else:
                     if eval_types:
-                        print('Starting evaluate {}'.format(
-                            ' and '.join(eval_types)))
+                        print(f'Starting evaluate {" and ".join(eval_types)}')
                         if eval_types == ['proposal_fast']:
                             result_file = args.out
                         else:
@@ -406,10 +404,10 @@ def main():
                                     outputs, args.out)
                             else:
                                 for name in outputs[0]:
-                                    print('\nEvaluating {}'.format(name))
+                                    print(f'\nEvaluating {name}')
                                     outputs_ = [out[name] for out in outputs]
                                     result_file = args.out
-                                    + '.{}'.format(name)
+                                    + f'.{name}'
                                     result_files = dataset.results2json(
                                         outputs_, result_file)
                         eval_results = coco_eval_with_return(
diff --git a/tools/train.py b/tools/train.py
index 5cae2f69..ad187299 100644
--- a/tools/train.py
+++ b/tools/train.py
@@ -98,7 +98,7 @@ def main():
     mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
     # init the logger before other steps
     timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
-    log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
+    log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
     logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
 
     # init the meta dict to record some important information such as
@@ -106,21 +106,20 @@ def main():
     meta = dict()
     # log env info
     env_info_dict = collect_env()
-    env_info = '\n'.join([('{}: {}'.format(k, v))
-                          for k, v in env_info_dict.items()])
+    env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
     dash_line = '-' * 60 + '\n'
     logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                 dash_line)
     meta['env_info'] = env_info
 
     # log some basic info
-    logger.info('Distributed training: {}'.format(distributed))
-    logger.info('Config:\n{}'.format(cfg.text))
+    logger.info(f'Distributed training: {distributed}')
+    logger.info(f'Config:\n{cfg.text}')
 
     # set random seeds
     if args.seed is not None:
-        logger.info('Set random seed to {}, deterministic: {}'.format(
-            args.seed, args.deterministic))
+        logger.info(f'Set random seed to {args.seed}, '
+                    f'deterministic: {args.deterministic}')
         set_random_seed(args.seed, deterministic=args.deterministic)
     cfg.seed = args.seed
     meta['seed'] = args.seed
diff --git a/tools/upgrade_model_version.py b/tools/upgrade_model_version.py
index 00bcdf44..3239dbeb 100644
--- a/tools/upgrade_model_version.py
+++ b/tools/upgrade_model_version.py
@@ -21,7 +21,7 @@ def convert(in_file, out_file):
         m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key)
         if m is not None:
             param = m.groups()[1]
-            new_key = key.replace(param, 'conv.{}'.format(param))
+            new_key = key.replace(param, f'conv.{param}')
             out_state_dict[new_key] = val
             continue
 
-- 
GitLab