From 42f5b6c68698f5bf5de97b92120b00616f4fbabe Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Haian=20Huang=28=E6=B7=B1=E5=BA=A6=E7=9C=B8=29?=
 <1286304229@qq.com>
Date: Mon, 11 Jan 2021 11:34:06 +0800
Subject: [PATCH] Add Tag to config (#4426)

---
 configs/albu_example/README.md        | 2 ++
 configs/atss/README.md                | 2 ++
 configs/carafe/README.md              | 2 ++
 configs/cascade_rcnn/README.md        | 2 ++
 configs/cascade_rpn/README.md         | 2 ++
 configs/centripetalnet/README.md      | 2 ++
 configs/cityscapes/README.md          | 2 ++
 configs/cornernet/README.md           | 2 ++
 configs/dcn/README.md                 | 2 ++
 configs/deepfashion/README.md         | 2 ++
 configs/detectors/README.md           | 2 ++
 configs/detr/README.md                | 2 ++
 configs/double_heads/README.md        | 2 ++
 configs/dynamic_rcnn/README.md        | 2 ++
 configs/empirical_attention/README.md | 2 ++
 configs/fast_rcnn/README.md           | 2 ++
 configs/faster_rcnn/README.md         | 2 ++
 configs/fcos/README.md                | 2 ++
 configs/foveabox/README.md            | 2 ++
 configs/fp16/README.md                | 2 ++
 configs/free_anchor/README.md         | 2 ++
 configs/fsaf/README.md                | 2 ++
 configs/gcnet/README.md               | 2 ++
 configs/gfl/README.md                 | 2 ++
 configs/ghm/README.md                 | 2 ++
 configs/gn+ws/README.md               | 2 ++
 configs/gn/README.md                  | 2 ++
 configs/grid_rcnn/README.md           | 2 ++
 configs/groie/README.md               | 2 ++
 configs/guided_anchoring/README.md    | 2 ++
 configs/hrnet/README.md               | 2 ++
 configs/htc/README.md                 | 2 ++
 configs/instaboost/README.md          | 2 ++
 configs/legacy_1.x/README.md          | 2 ++
 configs/libra_rcnn/README.md          | 2 ++
 configs/lvis/README.md                | 2 ++
 configs/mask_rcnn/README.md           | 2 ++
 configs/ms_rcnn/README.md             | 2 ++
 configs/nas_fcos/README.md            | 2 ++
 configs/nas_fpn/README.md             | 2 ++
 configs/paa/README.md                 | 2 ++
 configs/pafpn/README.md               | 2 ++
 configs/pascal_voc/README.md          | 2 ++
 configs/pisa/README.md                | 2 ++
 configs/point_rend/README.md          | 2 ++
 configs/regnet/README.md              | 2 ++
 configs/reppoints/README.md           | 2 ++
 configs/res2net/README.md             | 2 ++
 configs/resnest/README.md             | 2 ++
 configs/retinanet/README.md           | 2 ++
 configs/rpn/README.md                 | 2 ++
 configs/sabl/README.md                | 2 ++
 configs/scratch/README.md             | 2 ++
 configs/ssd/README.md                 | 2 ++
 configs/tridentnet/README.md          | 2 ++
 configs/vfnet/README.md               | 2 ++
 configs/wider_face/README.md          | 2 ++
 configs/yolact/README.md              | 2 ++
 configs/yolo/README.md                | 2 ++
 59 files changed, 118 insertions(+)

diff --git a/configs/albu_example/README.md b/configs/albu_example/README.md
index 25e8a074..eb360810 100644
--- a/configs/albu_example/README.md
+++ b/configs/albu_example/README.md
@@ -1,5 +1,7 @@
 # Albu Example
 
+[OTHERS]
+
 ## Results and Models
 
 | Backbone  | Style   | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
diff --git a/configs/atss/README.md b/configs/atss/README.md
index 99f57165..4ba91500 100644
--- a/configs/atss/README.md
+++ b/configs/atss/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @article{zhang2019bridging,
   title   =  {Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection},
diff --git a/configs/carafe/README.md b/configs/carafe/README.md
index 0cb57258..d9ca6644 100644
--- a/configs/carafe/README.md
+++ b/configs/carafe/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 We provide config files to reproduce the object detection & instance segmentation results in the ICCV 2019 Oral paper for [CARAFE: Content-Aware ReAssembly of FEatures](https://arxiv.org/abs/1905.02188).
 
 ```
diff --git a/configs/cascade_rcnn/README.md b/configs/cascade_rcnn/README.md
index 74c99066..15e6191a 100644
--- a/configs/cascade_rcnn/README.md
+++ b/configs/cascade_rcnn/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @article{Cai_2019,
    title={Cascade R-CNN: High Quality Object Detection and Instance Segmentation},
diff --git a/configs/cascade_rpn/README.md b/configs/cascade_rpn/README.md
index 8618ea36..2b0c6de8 100644
--- a/configs/cascade_rpn/README.md
+++ b/configs/cascade_rpn/README.md
@@ -1,5 +1,7 @@
 # Cascade RPN
 
+[ALGORITHM]
+
 We provide the code for reproducing experiment results of [Cascade RPN](https://arxiv.org/abs/1909.06720).
 
 ```
diff --git a/configs/centripetalnet/README.md b/configs/centripetalnet/README.md
index ca502e5d..18631da0 100644
--- a/configs/centripetalnet/README.md
+++ b/configs/centripetalnet/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @InProceedings{Dong_2020_CVPR,
 author = {Dong, Zhiwei and Li, Guoxuan and Liao, Yue and Wang, Fei and Ren, Pengju and Qian, Chen},
diff --git a/configs/cityscapes/README.md b/configs/cityscapes/README.md
index 51f43d41..1d4323cb 100644
--- a/configs/cityscapes/README.md
+++ b/configs/cityscapes/README.md
@@ -1,5 +1,7 @@
 # Cityscapes Dataset
 
+[DATASET]
+
 ## Common settings
 
 - All baselines were trained using 8 GPU with a batch size of 8 (1 images per GPU) using the [linear scaling rule](https://arxiv.org/abs/1706.02677) to scale the learning rate.
diff --git a/configs/cornernet/README.md b/configs/cornernet/README.md
index 65a7eda2..51e5e7a5 100644
--- a/configs/cornernet/README.md
+++ b/configs/cornernet/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @inproceedings{law2018cornernet,
   title={Cornernet: Detecting objects as paired keypoints},
diff --git a/configs/dcn/README.md b/configs/dcn/README.md
index 9c42f94d..9b86ef1a 100644
--- a/configs/dcn/README.md
+++ b/configs/dcn/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```none
 @inproceedings{dai2017deformable,
   title={Deformable Convolutional Networks},
diff --git a/configs/deepfashion/README.md b/configs/deepfashion/README.md
index fa31cca8..8959aa0b 100644
--- a/configs/deepfashion/README.md
+++ b/configs/deepfashion/README.md
@@ -1,5 +1,7 @@
 # DeepFashion
 
+[DATASET]
+
 [MMFashion](https://github.com/open-mmlab/mmfashion) develops "fashion parsing and segmentation" module
 based on the dataset
 [DeepFashion-Inshop](https://drive.google.com/drive/folders/0B7EVK8r0v71pVDZFQXRsMDZCX1E?usp=sharing).
diff --git a/configs/detectors/README.md b/configs/detectors/README.md
index 103c1cb4..f1964172 100644
--- a/configs/detectors/README.md
+++ b/configs/detectors/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 We provide the config files for [DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution](https://arxiv.org/pdf/2006.02334.pdf).
 
 ```BibTeX
diff --git a/configs/detr/README.md b/configs/detr/README.md
index 77633f92..711a308a 100644
--- a/configs/detr/README.md
+++ b/configs/detr/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 We provide the config files for DETR: [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872).
 
 ```BibTeX
diff --git a/configs/double_heads/README.md b/configs/double_heads/README.md
index 6c031d0b..3ad4f49c 100644
--- a/configs/double_heads/README.md
+++ b/configs/double_heads/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @article{wu2019rethinking,
     title={Rethinking Classification and Localization for Object Detection},
diff --git a/configs/dynamic_rcnn/README.md b/configs/dynamic_rcnn/README.md
index d3237747..ffdc42dc 100644
--- a/configs/dynamic_rcnn/README.md
+++ b/configs/dynamic_rcnn/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```
 @article{DynamicRCNN,
     author = {Hongkai Zhang and Hong Chang and Bingpeng Ma and Naiyan Wang and Xilin Chen},
diff --git a/configs/empirical_attention/README.md b/configs/empirical_attention/README.md
index ed151178..f9782d7e 100644
--- a/configs/empirical_attention/README.md
+++ b/configs/empirical_attention/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @article{zhu2019empirical,
   title={An Empirical Study of Spatial Attention Mechanisms in Deep Networks},
diff --git a/configs/fast_rcnn/README.md b/configs/fast_rcnn/README.md
index 1c9da507..c756507a 100644
--- a/configs/fast_rcnn/README.md
+++ b/configs/fast_rcnn/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @inproceedings{girshick2015fast,
   title={Fast r-cnn},
diff --git a/configs/faster_rcnn/README.md b/configs/faster_rcnn/README.md
index 6b957ac5..d43fc6da 100644
--- a/configs/faster_rcnn/README.md
+++ b/configs/faster_rcnn/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @article{Ren_2017,
    title={Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks},
diff --git a/configs/fcos/README.md b/configs/fcos/README.md
index 84b3fbfa..d252e1e1 100644
--- a/configs/fcos/README.md
+++ b/configs/fcos/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @article{tian2019fcos,
   title={FCOS: Fully Convolutional One-Stage Object Detection},
diff --git a/configs/foveabox/README.md b/configs/foveabox/README.md
index 7b69178d..91a43c97 100644
--- a/configs/foveabox/README.md
+++ b/configs/foveabox/README.md
@@ -1,5 +1,7 @@
 # FoveaBox: Beyond Anchor-based Object Detector
 
+[ALGORITHM]
+
 FoveaBox is an accurate, flexible and completely anchor-free object detection system for object detection framework, as presented in our paper [https://arxiv.org/abs/1904.03797](https://arxiv.org/abs/1904.03797):
 Different from previous anchor-based methods, FoveaBox directly learns the object existing possibility and the bounding box coordinates without anchor reference. This is achieved by: (a) predicting category-sensitive semantic maps for the object existing possibility, and (b) producing category-agnostic bounding box for each position that potentially contains an object.
 
diff --git a/configs/fp16/README.md b/configs/fp16/README.md
index bca4fb9c..3128a2a6 100644
--- a/configs/fp16/README.md
+++ b/configs/fp16/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @article{micikevicius2017mixed,
   title={Mixed precision training},
diff --git a/configs/free_anchor/README.md b/configs/free_anchor/README.md
index 0cbb7afe..6d6474c9 100644
--- a/configs/free_anchor/README.md
+++ b/configs/free_anchor/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @inproceedings{zhang2019freeanchor,
   title   =  {{FreeAnchor}: Learning to Match Anchors for Visual Object Detection},
diff --git a/configs/fsaf/README.md b/configs/fsaf/README.md
index a07fe648..42468c8b 100644
--- a/configs/fsaf/README.md
+++ b/configs/fsaf/README.md
@@ -1,5 +1,7 @@
 # Feature Selective Anchor-Free Module for Single-Shot Object Detection
 
+[ALGORITHM]
+
 FSAF is an anchor-free method published in CVPR2019 ([https://arxiv.org/pdf/1903.00621.pdf](https://arxiv.org/pdf/1903.00621.pdf)).
 Actually it is equivalent to the anchor-based method with only one anchor at each feature map position in each FPN level.
 And this is how we implemented it.
diff --git a/configs/gcnet/README.md b/configs/gcnet/README.md
index 0fe0fc10..0ef8db73 100644
--- a/configs/gcnet/README.md
+++ b/configs/gcnet/README.md
@@ -7,6 +7,8 @@ We provide config files to reproduce the results in the paper for
 
 ## Introduction
 
+[ALGORITHM]
+
 **GCNet** is initially described in [arxiv](https://arxiv.org/abs/1904.11492). Via absorbing advantages of Non-Local Networks (NLNet) and Squeeze-Excitation Networks (SENet),  GCNet provides a simple, fast and effective approach for global context modeling, which generally outperforms both NLNet and SENet on major benchmarks for various recognition tasks.
 
 ## Citing GCNet
diff --git a/configs/gfl/README.md b/configs/gfl/README.md
index 7ca72cc5..53ae22b7 100644
--- a/configs/gfl/README.md
+++ b/configs/gfl/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 We provide config files to reproduce the object detection results in the paper [Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection](https://arxiv.org/abs/2006.04388)
 
 ```latex
diff --git a/configs/ghm/README.md b/configs/ghm/README.md
index 9f8d702c..6a70bcd4 100644
--- a/configs/ghm/README.md
+++ b/configs/ghm/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```
 @inproceedings{li2019gradient,
   title={Gradient Harmonized Single-stage Detector},
diff --git a/configs/gn+ws/README.md b/configs/gn+ws/README.md
index 1487ae86..988fb13e 100644
--- a/configs/gn+ws/README.md
+++ b/configs/gn+ws/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```
 @article{weightstandardization,
   author    = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille},
diff --git a/configs/gn/README.md b/configs/gn/README.md
index d6db55ea..48ee7ad3 100644
--- a/configs/gn/README.md
+++ b/configs/gn/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @inproceedings{wu2018group,
   title={Group Normalization},
diff --git a/configs/grid_rcnn/README.md b/configs/grid_rcnn/README.md
index 96b598f8..a1e83525 100644
--- a/configs/grid_rcnn/README.md
+++ b/configs/grid_rcnn/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @inproceedings{lu2019grid,
   title={Grid r-cnn},
diff --git a/configs/groie/README.md b/configs/groie/README.md
index 05385618..37e5de34 100644
--- a/configs/groie/README.md
+++ b/configs/groie/README.md
@@ -11,6 +11,8 @@ on COCO object detection.
 
 ## Introduction
 
+[ALGORITHM]
+
 This paper is motivated by the need to overcome to the limitations of existing
 RoI extractors which select only one (the best) layer from FPN.
 
diff --git a/configs/guided_anchoring/README.md b/configs/guided_anchoring/README.md
index e8b415b7..4c3c86cd 100644
--- a/configs/guided_anchoring/README.md
+++ b/configs/guided_anchoring/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 We provide config files to reproduce the results in the CVPR 2019 paper for [Region Proposal by Guided Anchoring](https://arxiv.org/abs/1901.03278).
 
 ```latex
diff --git a/configs/hrnet/README.md b/configs/hrnet/README.md
index dd1d6a8f..c6e974f2 100644
--- a/configs/hrnet/README.md
+++ b/configs/hrnet/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[BACKBONE]
+
 ```latex
 @inproceedings{SunXLW19,
   title={Deep High-Resolution Representation Learning for Human Pose Estimation},
diff --git a/configs/htc/README.md b/configs/htc/README.md
index d0fa59d9..13f6a6a5 100644
--- a/configs/htc/README.md
+++ b/configs/htc/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 We provide config files to reproduce the results in the CVPR 2019 paper for [Hybrid Task Cascade](https://arxiv.org/abs/1901.07518).
 
 ```latex
diff --git a/configs/instaboost/README.md b/configs/instaboost/README.md
index 1017fb96..5ab74a1a 100644
--- a/configs/instaboost/README.md
+++ b/configs/instaboost/README.md
@@ -1,5 +1,7 @@
 # InstaBoost for MMDetection
 
+[ALGORITHM]
+
 Configs in this directory is the implementation for ICCV2019 paper "InstaBoost: Boosting Instance Segmentation Via Probability Map Guided Copy-Pasting" and provided by the authors of the paper. InstaBoost is a data augmentation method for object detection and instance segmentation. The paper has been released on [`arXiv`](https://arxiv.org/abs/1908.07801).
 
 ```latex
diff --git a/configs/legacy_1.x/README.md b/configs/legacy_1.x/README.md
index ae751d61..024624c7 100644
--- a/configs/legacy_1.x/README.md
+++ b/configs/legacy_1.x/README.md
@@ -1,5 +1,7 @@
 # Legacy Configs in MMDetection V1.x
 
+[OTHERS]
+
 Configs in this directory implement the legacy configs used by MMDetection V1.x and its model zoos.
 
 To help users convert their models from V1.x to MMDetection V2.0, we provide v1.x configs to inference the converted v1.x models.
diff --git a/configs/libra_rcnn/README.md b/configs/libra_rcnn/README.md
index 7f19d4a9..1f28087f 100644
--- a/configs/libra_rcnn/README.md
+++ b/configs/libra_rcnn/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 We provide config files to reproduce the results in the CVPR 2019 paper [Libra R-CNN](https://arxiv.org/pdf/1904.02701.pdf).
 
 ```
diff --git a/configs/lvis/README.md b/configs/lvis/README.md
index a7d7850b..32768030 100644
--- a/configs/lvis/README.md
+++ b/configs/lvis/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[DATASET]
+
 ```latex
 @inproceedings{gupta2019lvis,
   title={{LVIS}: A Dataset for Large Vocabulary Instance Segmentation},
diff --git a/configs/mask_rcnn/README.md b/configs/mask_rcnn/README.md
index d65f1707..fd1dc5bc 100644
--- a/configs/mask_rcnn/README.md
+++ b/configs/mask_rcnn/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @article{He_2017,
    title={Mask R-CNN},
diff --git a/configs/ms_rcnn/README.md b/configs/ms_rcnn/README.md
index f71fda8d..c19dee36 100644
--- a/configs/ms_rcnn/README.md
+++ b/configs/ms_rcnn/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```
 @inproceedings{huang2019msrcnn,
     title={Mask Scoring R-CNN},
diff --git a/configs/nas_fcos/README.md b/configs/nas_fcos/README.md
index 420121fc..05ac996a 100644
--- a/configs/nas_fcos/README.md
+++ b/configs/nas_fcos/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @article{wang2019fcos,
   title={Nas-fcos: Fast neural architecture search for object detection},
diff --git a/configs/nas_fpn/README.md b/configs/nas_fpn/README.md
index d5faecf7..81f25b2a 100644
--- a/configs/nas_fpn/README.md
+++ b/configs/nas_fpn/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @inproceedings{ghiasi2019fpn,
   title={Nas-fpn: Learning scalable feature pyramid architecture for object detection},
diff --git a/configs/paa/README.md b/configs/paa/README.md
index 38abe0ba..df974ce7 100644
--- a/configs/paa/README.md
+++ b/configs/paa/README.md
@@ -1,5 +1,7 @@
 # Probabilistic Anchor Assignment with IoU Prediction for Object Detection
 
+[ALGORITHM]
+
 ## Results and Models
 
 We provide config files to reproduce the object detection results in the
diff --git a/configs/pafpn/README.md b/configs/pafpn/README.md
index 0d3ab9e1..03227e26 100644
--- a/configs/pafpn/README.md
+++ b/configs/pafpn/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```
 @inproceedings{liu2018path,
   author = {Shu Liu and
diff --git a/configs/pascal_voc/README.md b/configs/pascal_voc/README.md
index c7cb1dce..fb100bed 100644
--- a/configs/pascal_voc/README.md
+++ b/configs/pascal_voc/README.md
@@ -1,5 +1,7 @@
 # PASCAL VOC Dataset
 
+[DATASET]
+
 ## Results and Models
 
 | Architecture | Backbone  | Style   | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
diff --git a/configs/pisa/README.md b/configs/pisa/README.md
index b03ac7ad..2ab689e2 100644
--- a/configs/pisa/README.md
+++ b/configs/pisa/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @inproceedings{cao2019prime,
   title={Prime sample attention in object detection},
diff --git a/configs/point_rend/README.md b/configs/point_rend/README.md
index 0120185f..af5ded18 100644
--- a/configs/point_rend/README.md
+++ b/configs/point_rend/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @InProceedings{kirillov2019pointrend,
   title={{PointRend}: Image Segmentation as Rendering},
diff --git a/configs/regnet/README.md b/configs/regnet/README.md
index a3d332cd..226b41dc 100644
--- a/configs/regnet/README.md
+++ b/configs/regnet/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[BACKBONE]
+
 We implement RegNetX and RegNetY models in detection systems and provide their first results on Mask R-CNN, Faster R-CNN and RetinaNet.
 
 The pre-trained modles are converted from [model zoo of pycls](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md).
diff --git a/configs/reppoints/README.md b/configs/reppoints/README.md
index 0c22aa84..2ab22cd8 100644
--- a/configs/reppoints/README.md
+++ b/configs/reppoints/README.md
@@ -7,6 +7,8 @@ We provide code support and configuration files to reproduce the results in the
 
 ## Introduction
 
+[ALGORITHM]
+
 **RepPoints**, initially described in [arXiv](https://arxiv.org/abs/1904.11490), is a new representation method for visual objects, on which visual understanding tasks are typically centered. Visual object representation, aiming at both geometric description and appearance feature extraction, is conventionally achieved by `bounding box + RoIPool (RoIAlign)`. The bounding box representation is convenient to use; however, it provides only a rectangular localization of objects that lacks geometric precision and may consequently degrade feature quality. Our new representation, RepPoints, models objects by a `point set` instead of a `bounding box`, which learns to adaptively position themselves over an object in a manner that circumscribes the object鈥檚 `spatial extent` and enables `semantically aligned feature extraction`. This richer and more flexible representation maintains the convenience of bounding boxes while facilitating various visual understanding applications. This repo demonstrated the effectiveness of RepPoints for COCO object detection.
 
 Another feature of this repo is the demonstration of an `anchor-free detector`, which can be as effective as state-of-the-art anchor-based detection methods. The anchor-free detector can utilize either `bounding box` or `RepPoints` as the basic object representation.
diff --git a/configs/res2net/README.md b/configs/res2net/README.md
index 3275fdfb..a8d7fa5c 100644
--- a/configs/res2net/README.md
+++ b/configs/res2net/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[BACKBONE]
+
 We propose a novel building block for CNNs, namely Res2Net, by constructing hierarchical residual-like connections within one single residual block. The Res2Net represents multi-scale features at a granular level and increases the range of receptive fields for each network layer.
 
 |    Backbone     |Params. | GFLOPs  | top-1 err. | top-5 err. |
diff --git a/configs/resnest/README.md b/configs/resnest/README.md
index a26584af..d34d1c27 100644
--- a/configs/resnest/README.md
+++ b/configs/resnest/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[BACKBONE]
+
 ```latex
 @article{zhang2020resnest,
 title={ResNeSt: Split-Attention Networks},
diff --git a/configs/retinanet/README.md b/configs/retinanet/README.md
index ffb7b9f9..6b665342 100644
--- a/configs/retinanet/README.md
+++ b/configs/retinanet/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @inproceedings{lin2017focal,
   title={Focal loss for dense object detection},
diff --git a/configs/rpn/README.md b/configs/rpn/README.md
index 09aff132..4f6f712c 100644
--- a/configs/rpn/README.md
+++ b/configs/rpn/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @inproceedings{ren2015faster,
   title={Faster r-cnn: Towards real-time object detection with region proposal networks},
diff --git a/configs/sabl/README.md b/configs/sabl/README.md
index 85c3c57d..34b8367d 100644
--- a/configs/sabl/README.md
+++ b/configs/sabl/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 We provide config files to reproduce the object detection results in the ECCV 2020 Spotlight paper for [Side-Aware Boundary Localization for More Precise Object Detection](https://arxiv.org/abs/1912.04260).
 
 ```latex
diff --git a/configs/scratch/README.md b/configs/scratch/README.md
index 18f63820..d9a472f1 100644
--- a/configs/scratch/README.md
+++ b/configs/scratch/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[OTHERS]
+
 ```latex
 @article{he2018rethinking,
   title={Rethinking imagenet pre-training},
diff --git a/configs/ssd/README.md b/configs/ssd/README.md
index e0d17744..51262d68 100644
--- a/configs/ssd/README.md
+++ b/configs/ssd/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @article{Liu_2016,
    title={SSD: Single Shot MultiBox Detector},
diff --git a/configs/tridentnet/README.md b/configs/tridentnet/README.md
index 4ec4a8f5..5b327a3c 100644
--- a/configs/tridentnet/README.md
+++ b/configs/tridentnet/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```
 @InProceedings{li2019scale,
   title={Scale-Aware Trident Networks for Object Detection},
diff --git a/configs/vfnet/README.md b/configs/vfnet/README.md
index f5cc22ec..d1a94d15 100644
--- a/configs/vfnet/README.md
+++ b/configs/vfnet/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 **VarifocalNet (VFNet)** learns to predict the IoU-aware classification score which mixes the object presence confidence and localization accuracy together as the detection score for a bounding box. The learning is supervised by the proposed Varifocal Loss (VFL), based on a new star-shaped bounding box feature representation (the features at nine yellow sampling points). Given the new representation, the object localization accuracy is further improved by refining the initially regressed bounding box. The full paper is available at: [https://arxiv.org/abs/2008.13367](https://arxiv.org/abs/2008.13367).
 
 <div align="center">
diff --git a/configs/wider_face/README.md b/configs/wider_face/README.md
index 6eced922..63de2fbe 100644
--- a/configs/wider_face/README.md
+++ b/configs/wider_face/README.md
@@ -1,5 +1,7 @@
 # WIDER Face Dataset
 
+[DATASET]
+
 To use the WIDER Face dataset you need to download it
 and extract to the `data/WIDERFace` folder. Annotation in the VOC format
 can be found in this [repo](https://github.com/sovrasov/wider-face-pascal-voc-annotations.git).
diff --git a/configs/yolact/README.md b/configs/yolact/README.md
index fea128e3..92b13779 100644
--- a/configs/yolact/README.md
+++ b/configs/yolact/README.md
@@ -1,5 +1,7 @@
 # **Y**ou **O**nly **L**ook **A**t **C**oefficien**T**s
 
+[ALGORITHM]
+
 ```
     鈻堚枅鈺�   鈻堚枅鈺� 鈻堚枅鈻堚枅鈻堚枅鈺� 鈻堚枅鈺�      鈻堚枅鈻堚枅鈻堚晽  鈻堚枅鈻堚枅鈻堚枅鈺椻枅鈻堚枅鈻堚枅鈻堚枅鈻堚晽
     鈺氣枅鈻堚晽 鈻堚枅鈺斺暆鈻堚枅鈺斺晲鈺愨晲鈻堚枅鈺椻枅鈻堚晳     鈻堚枅鈺斺晲鈺愨枅鈻堚晽鈻堚枅鈺斺晲鈺愨晲鈺愨暆鈺氣晲鈺愨枅鈻堚晹鈺愨晲鈺�
diff --git a/configs/yolo/README.md b/configs/yolo/README.md
index 7f0c88c5..1f539c6f 100644
--- a/configs/yolo/README.md
+++ b/configs/yolo/README.md
@@ -2,6 +2,8 @@
 
 ## Introduction
 
+[ALGORITHM]
+
 ```latex
 @misc{redmon2018yolov3,
     title={YOLOv3: An Incremental Improvement},
-- 
GitLab