Skip to content
GitLab
菜单
项目
群组
代码片段
/
帮助
帮助
支持
社区论坛
快捷键
?
提交反馈
登录/注册
切换导航
菜单
打开侧边栏
Lin Sui
detectron2
提交
07f26afb
提交
07f26afb
编辑于
10月 10, 2021
作者:
suilin0432
浏览文件
update
上级
6edca3e6
变更
4
Hide whitespace changes
Inline
Side-by-side
detectron2/config/defaults.py
浏览文件 @
07f26afb
...
...
@@ -675,7 +675,7 @@ _C.MODEL.RESNETS.DEFORM_MODULATED = False
_C
.
MODEL
.
RESNETS
.
DEFORM_NUM_GROUPS
=
1
# ResNest 添加的参数设置
_C
.
MODEL
.
RESNETS
.
STRIDE_IN_1X1
=
False
#
_C.MODEL.RESNETS.STRIDE_IN_1X1 = False
# Apply deep stem
_C
.
MODEL
.
RESNETS
.
DEEP_STEM
=
True
# Apply avg after conv2 in the BottleBlock
...
...
detectron2/data/datasets/builtin.py
浏览文件 @
07f26afb
...
...
@@ -27,7 +27,7 @@ from .cityscapes_panoptic import register_all_cityscapes_panoptic
from
.coco
import
load_sem_seg
,
register_coco_instances
,
register_coco_instances_wsl
from
.coco_panoptic
import
register_coco_panoptic
,
register_coco_panoptic_separated
from
.lvis
import
get_lvis_instances_meta
,
register_lvis_instances
from
.pascal_voc
import
register_pascal_voc
,
register_pascal_voc_wsl
,
register_pascal_voc_wsl_top1
,
register_pascal_voc_wsl_thres
,
register_pascal_voc_wsl_contain
,
register_pascal_voc_wsl_contain_total
,
register_pascal_voc_wsl_mist
,
register_pascal_voc_wsl_mist_contain
,
register_pascal_voc_wsl_contain_all
,
register_pascal_voc_wsl_contain_w2f
,
register_pascal_voc_wsl_oicr_contain
,
register_pascal_voc_wsl_oicr_contain_all
,
register_pascal_voc_wsl_w2f_overlap
,
register_pascal_voc_wsl_contain_all_adaptive
from
.pascal_voc
import
register_pascal_voc
,
register_pascal_voc_wsl
,
register_pascal_voc_wsl_top1
,
register_pascal_voc_wsl_thres
,
register_pascal_voc_wsl_contain
,
register_pascal_voc_wsl_contain_total
,
register_pascal_voc_wsl_mist
,
register_pascal_voc_wsl_mist_contain
,
register_pascal_voc_wsl_contain_all
,
register_pascal_voc_wsl_contain_w2f
,
register_pascal_voc_wsl_oicr_contain
,
register_pascal_voc_wsl_oicr_contain_all
,
register_pascal_voc_wsl_w2f_overlap
,
register_pascal_voc_wsl_contain_all_adaptive
,
register_pascal_voc_wsl_contain_adaptive
,
register_pascal_voc_wsl_contain_keep
,
register_pascal_voc_wsl_teach_iter
# ==== Predefined datasets and splits for COCO ==========
...
...
@@ -133,6 +133,8 @@ COCO_WSL = {
"coco_2014_valminusminival_casd_wsl_contain"
:
(
"coco/val2014"
,
"coco/annotations/casd_valminusminival2014_wsl_contain.json"
),
"coco_2014_train_casd_wsl_contain_adaptive"
:
(
"coco/train2014"
,
"coco/annotations/casd_train2014_wsl_contain_adaptive.json"
),
"coco_2014_valminusminival_casd_wsl_contain_adaptive"
:
(
"coco/val2014"
,
"coco/annotations/casd_valminusminival2014_wsl_contain_adaptive.json"
),
"coco_2014_train_casd_wsl_topone"
:
(
"coco/train2014"
,
"coco/annotations/casd_train2014_wsl_topone.json"
),
"coco_2014_valminusminival_casd_wsl_topone"
:
(
"coco/val2014"
,
"coco/annotations/casd_valminusminival2014_wsl_topone.json"
),
}
# 注册弱监督的打好 pgt 标记的数据集
...
...
@@ -307,6 +309,57 @@ def register_all_pascal_voc_wsl_thres(root):
register_pascal_voc_wsl_thres
(
name
,
os
.
path
.
join
(
root
,
dirname
),
split
,
year
)
MetadataCatalog
.
get
(
name
).
evaluator_type
=
"pascal_voc"
def
register_all_pascal_voc_wsl_teach_iter1
(
root
):
SPLITS
=
[
(
"voc_2007_train_wsl_teach_iter1"
,
"VOC2007"
,
"train"
),
(
"voc_2007_val_wsl_teach_iter1"
,
"VOC2007"
,
"val"
)
]
for
name
,
dirname
,
split
in
SPLITS
:
year
=
2007
if
"2007"
in
name
else
2012
register_pascal_voc_wsl_teach_iter
(
name
,
os
.
path
.
join
(
root
,
dirname
),
split
,
year
,
"1"
)
MetadataCatalog
.
get
(
name
).
evaluator_type
=
"pascal_voc"
def
register_all_pascal_voc_wsl_contain_keep_01
(
root
):
SPLITS
=
[
(
"voc_2007_train_wsl_contain_keep01"
,
"VOC2007"
,
"train"
),
(
"voc_2007_val_wsl_contain_keep01"
,
"VOC2007"
,
"val"
)
]
for
name
,
dirname
,
split
in
SPLITS
:
year
=
2007
if
"2007"
in
name
else
2012
register_pascal_voc_wsl_contain_keep
(
name
,
os
.
path
.
join
(
root
,
dirname
),
split
,
year
,
"01"
)
MetadataCatalog
.
get
(
name
).
evaluator_type
=
"pascal_voc"
def
register_all_pascal_voc_wsl_contain_keep_015
(
root
):
SPLITS
=
[
(
"voc_2007_train_wsl_contain_keep015"
,
"VOC2007"
,
"train"
),
(
"voc_2007_val_wsl_contain_keep015"
,
"VOC2007"
,
"val"
)
]
for
name
,
dirname
,
split
in
SPLITS
:
year
=
2007
if
"2007"
in
name
else
2012
register_pascal_voc_wsl_contain_keep
(
name
,
os
.
path
.
join
(
root
,
dirname
),
split
,
year
,
"015"
)
MetadataCatalog
.
get
(
name
).
evaluator_type
=
"pascal_voc"
def
register_all_pascal_voc_wsl_contain_keep_025
(
root
):
SPLITS
=
[
(
"voc_2007_train_wsl_contain_keep025"
,
"VOC2007"
,
"train"
),
(
"voc_2007_val_wsl_contain_keep025"
,
"VOC2007"
,
"val"
)
]
for
name
,
dirname
,
split
in
SPLITS
:
year
=
2007
if
"2007"
in
name
else
2012
register_pascal_voc_wsl_contain_keep
(
name
,
os
.
path
.
join
(
root
,
dirname
),
split
,
year
,
"025"
)
MetadataCatalog
.
get
(
name
).
evaluator_type
=
"pascal_voc"
def
register_all_pascal_voc_wsl_contain_keep_03
(
root
):
SPLITS
=
[
(
"voc_2007_train_wsl_contain_keep03"
,
"VOC2007"
,
"train"
),
(
"voc_2007_val_wsl_contain_keep03"
,
"VOC2007"
,
"val"
)
]
for
name
,
dirname
,
split
in
SPLITS
:
year
=
2007
if
"2007"
in
name
else
2012
register_pascal_voc_wsl_contain_keep
(
name
,
os
.
path
.
join
(
root
,
dirname
),
split
,
year
,
"03"
)
MetadataCatalog
.
get
(
name
).
evaluator_type
=
"pascal_voc"
def
register_all_pascal_voc_wsl_contain_075
(
root
):
SPLITS
=
[
(
"voc_2007_train_wsl_contain_075"
,
"VOC2007"
,
"train"
),
...
...
@@ -417,6 +470,16 @@ def register_all_pascal_voc_wsl_contain_all_adaptive(root):
register_pascal_voc_wsl_contain_all_adaptive
(
name
,
os
.
path
.
join
(
root
,
dirname
),
split
,
year
)
MetadataCatalog
.
get
(
name
).
evaluator_type
=
"pascal_voc"
def
register_all_pascal_voc_wsl_contain_adaptive
(
root
):
SPLITS
=
[
(
"voc_2007_train_wsl_contain_adaptive"
,
"VOC2007"
,
"train"
),
(
"voc_2007_val_wsl_contain_adaptive"
,
"VOC2007"
,
"val"
)
]
for
name
,
dirname
,
split
in
SPLITS
:
year
=
2007
if
"2007"
in
name
else
2012
register_pascal_voc_wsl_contain_adaptive
(
name
,
os
.
path
.
join
(
root
,
dirname
),
split
,
year
)
MetadataCatalog
.
get
(
name
).
evaluator_type
=
"pascal_voc"
def
register_all_pascal_voc_w2f
(
root
):
SPLITS
=
[
(
"voc_2007_train_wsl_w2f"
,
"VOC2007"
,
"train"
),
...
...
@@ -482,4 +545,10 @@ if __name__.endswith(".builtin"):
register_all_pascal_voc_wsl_oicr_contain
(
_root
)
register_all_pascal_voc_wsl_oicr_contain_all
(
_root
)
register_all_pascal_voc_wsl_contain_all_adaptive
(
_root
)
register_all_pascal_voc_wsl_contain_adaptive
(
_root
)
register_all_pascal_voc_wsl_contain_keep_01
(
_root
)
register_all_pascal_voc_wsl_contain_keep_015
(
_root
)
register_all_pascal_voc_wsl_contain_keep_025
(
_root
)
register_all_pascal_voc_wsl_contain_keep_03
(
_root
)
register_all_pascal_voc_wsl_teach_iter1
(
_root
)
register_all_ade20k
(
_root
)
detectron2/data/datasets/pascal_voc.py
浏览文件 @
07f26afb
...
...
@@ -12,7 +12,7 @@ from detectron2.data import DatasetCatalog, MetadataCatalog
from
detectron2.structures
import
BoxMode
from
detectron2.utils.file_io
import
PathManager
__all__
=
[
"load_voc_instances"
,
"register_pascal_voc"
,
"register_pascal_voc_wsl"
,
"register_pascal_voc_wsl_top1"
,
"register_pascal_voc_wsl_thres"
,
"register_pascal_voc_wsl_mist"
,
"register_pascal_voc_wsl_mist_contain"
,
"register_pascal_voc_wsl_contain_all"
,
"register_pascal_voc_wsl_contain_w2f"
,
"register_pascal_voc_wsl_oicr_contain"
,
"register_pascal_voc_wsl_oicr_contain_all"
,
"register_pascal_voc_wsl_w2f_overlap"
,
"register_pascal_voc_wsl_contain_all_adaptive"
]
__all__
=
[
"load_voc_instances"
,
"register_pascal_voc"
,
"register_pascal_voc_wsl"
,
"register_pascal_voc_wsl_top1"
,
"register_pascal_voc_wsl_thres"
,
"register_pascal_voc_wsl_mist"
,
"register_pascal_voc_wsl_mist_contain"
,
"register_pascal_voc_wsl_contain_all"
,
"register_pascal_voc_wsl_contain_w2f"
,
"register_pascal_voc_wsl_oicr_contain"
,
"register_pascal_voc_wsl_oicr_contain_all"
,
"register_pascal_voc_wsl_w2f_overlap"
,
"register_pascal_voc_wsl_contain_all_adaptive"
,
"register_pascal_voc_wsl_contain_adaptive"
,
"register_pascal_voc_wsl_contain_keep"
,
"register_pascal_voc_wsl_teach_iter"
]
# fmt: off
...
...
@@ -33,6 +33,7 @@ def load_voc_instances(dirname: str, split: str, class_names: Union[List[str], T
split (str): one of "train", "test", "val", "trainval"
class_names: list or tuple of class names
"""
# print(1)
with
PathManager
.
open
(
os
.
path
.
join
(
dirname
,
"ImageSets"
,
"Main"
,
split
+
".txt"
))
as
f
:
fileids
=
np
.
loadtxt
(
f
,
dtype
=
np
.
str
)
...
...
@@ -89,7 +90,7 @@ def load_voc_instances_wsl(dirname: str, split: str, class_names: Union[List[str
# 获取 数据集对应划分(train, val, test) 图片 ids
with
PathManager
.
open
(
os
.
path
.
join
(
dirname
,
"ImageSets"
,
"Main"
,
split
+
".txt"
))
as
f
:
fileids
=
np
.
loadtxt
(
f
,
dtype
=
np
.
str
)
# print(2)
# 针对 single-input 的文件
# print("load from {}/single_voc07_wsl_{}_contain.json".format(dirname, split))
# annotation_wsl = json.load(open(
...
...
@@ -470,6 +471,163 @@ def load_voc_instances_wsl_contain(dirname: str, split: str, thres, class_names:
dicts
.
append
(
r
)
return
dicts
def
load_voc_instances_wsl_contain_keep
(
dirname
:
str
,
split
:
str
,
thres
,
class_names
:
Union
[
List
[
str
],
Tuple
[
str
,
...]]):
# 获取 数据集对应划分(train, val, test) 图片 ids
with
PathManager
.
open
(
os
.
path
.
join
(
dirname
,
"ImageSets"
,
"Main"
,
split
+
".txt"
))
as
f
:
fileids
=
np
.
loadtxt
(
f
,
dtype
=
np
.
str
)
# 针对 single-input 的文件
# print("load from {}/single_voc07_wsl_{}_contain.json".format(dirname, split))
# annotation_wsl = json.load(open(
# "{}/single_voc07_wsl_{}_contain.json".format(dirname, split), "r"
# ))
# 获取 annotations, wsl 预测之后的结果会保存为 json 的格式
if
"07"
in
dirname
:
annotation_wsl
=
json
.
load
(
open
(
"{}/voc07_wsl_{}_contain_keep{}.json"
.
format
(
dirname
,
split
,
thres
),
"r"
))
elif
"12"
in
dirname
:
annotation_wsl
=
json
.
load
(
open
(
"{}/casd_voc12_wsl_{}_contain_keep{}.json"
.
format
(
dirname
,
split
,
thres
),
"r"
))
else
:
assert
False
,
"Wrong dirname: {}"
.
format
(
dirname
)
multi_class_labels
=
None
if
"multi_label"
in
annotation_wsl
:
multi_class_labels
=
annotation_wsl
.
pop
(
"multi_label"
)
annotation_dirname
=
PathManager
.
get_local_path
(
os
.
path
.
join
(
dirname
,
"Annotations/"
))
dicts
=
[]
for
fileid
in
fileids
:
anno
=
annotation_wsl
[
str
(
int
(
fileid
))]
jpeg_file
=
os
.
path
.
join
(
dirname
,
"JPEGImages"
,
fileid
+
".jpg"
)
anno_file
=
os
.
path
.
join
(
annotation_dirname
,
fileid
+
".xml"
)
if
not
os
.
path
.
isfile
(
anno_file
):
with
Image
.
open
(
jpeg_file
)
as
img
:
width
,
height
=
img
.
size
r
=
{
"file_name"
:
jpeg_file
,
"image_id"
:
fileid
,
"height"
:
height
,
"width"
:
width
}
instances
=
[]
for
obj
in
anno
:
bbox
=
obj
[
"bbox"
]
bbox
=
[
int
(
i
)
for
i
in
bbox
]
# 因为 predict 出来的 bbox 是float, 要转化为 int list
category_id
=
obj
[
"category_id"
]
-
1
# 因为保存统计时将 index + 1 了从而方便 TIDE 统计了, 因此这里需要 - 1
instances
.
append
(
{
"category_id"
:
category_id
,
"bbox"
:
bbox
,
"bbox_mode"
:
BoxMode
.
XYXY_ABS
}
)
r
[
"annotations"
]
=
instances
if
multi_class_labels
is
not
None
:
r
[
"multi_label"
]
=
multi_class_labels
[
str
(
int
(
fileid
))]
dicts
.
append
(
r
)
continue
with
PathManager
.
open
(
anno_file
)
as
f
:
tree
=
ET
.
parse
(
f
)
r
=
{
"file_name"
:
jpeg_file
,
"image_id"
:
fileid
,
"height"
:
int
(
tree
.
findall
(
"./size/height"
)[
0
].
text
),
"width"
:
int
(
tree
.
findall
(
"./size/width"
)[
0
].
text
),
}
instances
=
[]
# 这里是从 annotation_wsl 中进行 gt 信息的提取, 而不是 从 anno file 中提取真正的 gt 信息出来
for
obj
in
anno
:
bbox
=
obj
[
"bbox"
]
bbox
=
[
int
(
i
)
for
i
in
bbox
]
category_id
=
obj
[
"category_id"
]
-
1
instances
.
append
(
{
"category_id"
:
category_id
,
"bbox"
:
bbox
,
"bbox_mode"
:
BoxMode
.
XYXY_ABS
}
)
r
[
"annotations"
]
=
instances
if
multi_class_labels
is
not
None
:
r
[
"multi_label"
]
=
multi_class_labels
[
str
(
int
(
fileid
))]
dicts
.
append
(
r
)
return
dicts
def
load_voc_instances_wsl_teach_iter
(
dirname
:
str
,
split
:
str
,
thres
,
class_names
:
Union
[
List
[
str
],
Tuple
[
str
,
...]]):
# 获取 数据集对应划分(train, val, test) 图片 ids
with
PathManager
.
open
(
os
.
path
.
join
(
dirname
,
"ImageSets"
,
"Main"
,
split
+
".txt"
))
as
f
:
fileids
=
np
.
loadtxt
(
f
,
dtype
=
np
.
str
)
# 针对 single-input 的文件
# print("load from {}/single_voc07_wsl_{}_contain.json".format(dirname, split))
# annotation_wsl = json.load(open(
# "{}/single_voc07_wsl_{}_contain.json".format(dirname, split), "r"
# ))
# 获取 annotations, wsl 预测之后的结果会保存为 json 的格式
if
"07"
in
dirname
:
annotation_wsl
=
json
.
load
(
open
(
"{}/voc07_wsl_{}_teach_iter{}.json"
.
format
(
dirname
,
split
,
thres
),
"r"
))
elif
"12"
in
dirname
:
annotation_wsl
=
json
.
load
(
open
(
"{}/casd_voc12_wsl_{}_teach_iter{}.json"
.
format
(
dirname
,
split
,
thres
),
"r"
))
else
:
assert
False
,
"Wrong dirname: {}"
.
format
(
dirname
)
multi_class_labels
=
None
if
"multi_label"
in
annotation_wsl
:
multi_class_labels
=
annotation_wsl
.
pop
(
"multi_label"
)
annotation_dirname
=
PathManager
.
get_local_path
(
os
.
path
.
join
(
dirname
,
"Annotations/"
))
dicts
=
[]
for
fileid
in
fileids
:
if
str
(
int
(
fileid
))
not
in
annotation_wsl
:
continue
anno
=
annotation_wsl
[
str
(
int
(
fileid
))]
jpeg_file
=
os
.
path
.
join
(
dirname
,
"JPEGImages"
,
fileid
+
".jpg"
)
anno_file
=
os
.
path
.
join
(
annotation_dirname
,
fileid
+
".xml"
)
if
not
os
.
path
.
isfile
(
anno_file
):
with
Image
.
open
(
jpeg_file
)
as
img
:
width
,
height
=
img
.
size
r
=
{
"file_name"
:
jpeg_file
,
"image_id"
:
fileid
,
"height"
:
height
,
"width"
:
width
}
instances
=
[]
for
obj
in
anno
:
bbox
=
obj
[
"bbox"
]
bbox
=
[
int
(
i
)
for
i
in
bbox
]
# 因为 predict 出来的 bbox 是float, 要转化为 int list
category_id
=
obj
[
"category_id"
]
-
1
# 因为保存统计时将 index + 1 了从而方便 TIDE 统计了, 因此这里需要 - 1
instances
.
append
(
{
"category_id"
:
category_id
,
"bbox"
:
bbox
,
"bbox_mode"
:
BoxMode
.
XYXY_ABS
}
)
r
[
"annotations"
]
=
instances
if
multi_class_labels
is
not
None
:
r
[
"multi_label"
]
=
multi_class_labels
[
str
(
int
(
fileid
))]
dicts
.
append
(
r
)
continue
with
PathManager
.
open
(
anno_file
)
as
f
:
tree
=
ET
.
parse
(
f
)
r
=
{
"file_name"
:
jpeg_file
,
"image_id"
:
fileid
,
"height"
:
int
(
tree
.
findall
(
"./size/height"
)[
0
].
text
),
"width"
:
int
(
tree
.
findall
(
"./size/width"
)[
0
].
text
),
}
instances
=
[]
# 这里是从 annotation_wsl 中进行 gt 信息的提取, 而不是 从 anno file 中提取真正的 gt 信息出来
for
obj
in
anno
:
bbox
=
obj
[
"bbox"
]
bbox
=
[
int
(
i
)
for
i
in
bbox
]
category_id
=
obj
[
"category_id"
]
-
1
instances
.
append
(
{
"category_id"
:
category_id
,
"bbox"
:
bbox
,
"bbox_mode"
:
BoxMode
.
XYXY_ABS
}
)
r
[
"annotations"
]
=
instances
if
multi_class_labels
is
not
None
:
r
[
"multi_label"
]
=
multi_class_labels
[
str
(
int
(
fileid
))]
dicts
.
append
(
r
)
return
dicts
def
load_voc_instances_wsl_contain_all
(
dirname
:
str
,
split
:
str
,
class_names
:
Union
[
List
[
str
],
Tuple
[
str
,
...]]):
# 获取 数据集对应划分(train, val, test) 图片 ids
with
PathManager
.
open
(
os
.
path
.
join
(
dirname
,
"ImageSets"
,
"Main"
,
split
+
".txt"
))
as
f
:
...
...
@@ -626,6 +784,85 @@ def load_voc_instances_wsl_contain_all_adaptive(dirname: str, split: str, class_
dicts
.
append
(
r
)
return
dicts
def
load_voc_instances_wsl_contain_adaptive
(
dirname
:
str
,
split
:
str
,
class_names
:
Union
[
List
[
str
],
Tuple
[
str
,
...]]):
# 获取 数据集对应划分(train, val, test) 图片 ids
with
PathManager
.
open
(
os
.
path
.
join
(
dirname
,
"ImageSets"
,
"Main"
,
split
+
".txt"
))
as
f
:
fileids
=
np
.
loadtxt
(
f
,
dtype
=
np
.
str
)
# 针对 single-input 的文件
# print("load from {}/single_voc07_wsl_{}_contain.json".format(dirname, split))
# annotation_wsl = json.load(open(
# "{}/single_voc07_wsl_{}_contain.json".format(dirname, split), "r"
# ))
# 获取 annotations, wsl 预测之后的结果会保存为 json 的格式
if
"07"
in
dirname
:
annotation_wsl
=
json
.
load
(
open
(
"{}/voc07_wsl_{}_contain_adaptive.json"
.
format
(
dirname
,
split
),
"r"
))
elif
"12"
in
dirname
:
annotation_wsl
=
json
.
load
(
open
(
"{}/casd_voc12_wsl_{}_contain_adaptive.json"
.
format
(
dirname
,
split
),
"r"
))
else
:
assert
False
,
"Wrong dirname: {}"
.
format
(
dirname
)
multi_class_labels
=
None
if
"multi_label"
in
annotation_wsl
:
multi_class_labels
=
annotation_wsl
.
pop
(
"multi_label"
)
annotation_dirname
=
PathManager
.
get_local_path
(
os
.
path
.
join
(
dirname
,
"Annotations/"
))
dicts
=
[]
for
fileid
in
fileids
:
if
str
(
int
(
fileid
))
not
in
annotation_wsl
:
continue
anno
=
annotation_wsl
[
str
(
int
(
fileid
))]
jpeg_file
=
os
.
path
.
join
(
dirname
,
"JPEGImages"
,
fileid
+
".jpg"
)
anno_file
=
os
.
path
.
join
(
annotation_dirname
,
fileid
+
".xml"
)
if
not
os
.
path
.
isfile
(
anno_file
):
with
Image
.
open
(
jpeg_file
)
as
img
:
width
,
height
=
img
.
size
r
=
{
"file_name"
:
jpeg_file
,
"image_id"
:
fileid
,
"height"
:
height
,
"width"
:
width
}
instances
=
[]
for
obj
in
anno
:
bbox
=
obj
[
"bbox"
]
bbox
=
[
int
(
i
)
for
i
in
bbox
]
# 因为 predict 出来的 bbox 是float, 要转化为 int list
category_id
=
obj
[
"category_id"
]
-
1
# 因为保存统计时将 index + 1 了从而方便 TIDE 统计了, 因此这里需要 - 1
instances
.
append
(
{
"category_id"
:
category_id
,
"bbox"
:
bbox
,
"bbox_mode"
:
BoxMode
.
XYXY_ABS
}
)
r
[
"annotations"
]
=
instances
if
multi_class_labels
is
not
None
:
r
[
"multi_label"
]
=
multi_class_labels
[
str
(
int
(
fileid
))]
dicts
.
append
(
r
)
continue
with
PathManager
.
open
(
anno_file
)
as
f
:
tree
=
ET
.
parse
(
f
)
r
=
{
"file_name"
:
jpeg_file
,
"image_id"
:
fileid
,
"height"
:
int
(
tree
.
findall
(
"./size/height"
)[
0
].
text
),
"width"
:
int
(
tree
.
findall
(
"./size/width"
)[
0
].
text
),
}
instances
=
[]
# 这里是从 annotation_wsl 中进行 gt 信息的提取, 而不是 从 anno file 中提取真正的 gt 信息出来
for
obj
in
anno
:
bbox
=
obj
[
"bbox"
]
bbox
=
[
int
(
i
)
for
i
in
bbox
]
category_id
=
obj
[
"category_id"
]
-
1
instances
.
append
(
{
"category_id"
:
category_id
,
"bbox"
:
bbox
,
"bbox_mode"
:
BoxMode
.
XYXY_ABS
}
)
r
[
"annotations"
]
=
instances
if
multi_class_labels
is
not
None
:
r
[
"multi_label"
]
=
multi_class_labels
[
str
(
int
(
fileid
))]
dicts
.
append
(
r
)
return
dicts
def
load_voc_instances_wsl_w2f
(
dirname
:
str
,
split
:
str
,
class_names
:
Union
[
List
[
str
],
Tuple
[
str
,
...]]):
# 获取 数据集对应划分(train, val, test) 图片 ids
with
PathManager
.
open
(
os
.
path
.
join
(
dirname
,
"ImageSets"
,
"Main"
,
split
+
".txt"
))
as
f
:
...
...
@@ -1069,6 +1306,18 @@ def register_pascal_voc_wsl_contain(name, dirname, split, year, thres, class_nam
thing_classes
=
list
(
class_names
),
dirname
=
dirname
,
year
=
year
,
split
=
split
)
def
register_pascal_voc_wsl_contain_keep
(
name
,
dirname
,
split
,
year
,
thres
,
class_names
=
CLASS_NAMES
):
DatasetCatalog
.
register
(
name
,
lambda
:
load_voc_instances_wsl_contain_keep
(
dirname
,
split
,
thres
,
class_names
))
MetadataCatalog
.
get
(
name
).
set
(
thing_classes
=
list
(
class_names
),
dirname
=
dirname
,
year
=
year
,
split
=
split
)
def
register_pascal_voc_wsl_teach_iter
(
name
,
dirname
,
split
,
year
,
thres
,
class_names
=
CLASS_NAMES
):
DatasetCatalog
.
register
(
name
,
lambda
:
load_voc_instances_wsl_teach_iter
(
dirname
,
split
,
thres
,
class_names
))
MetadataCatalog
.
get
(
name
).
set
(
thing_classes
=
list
(
class_names
),
dirname
=
dirname
,
year
=
year
,
split
=
split
)
def
register_pascal_voc_wsl_contain_total
(
name
,
dirname
,
split
,
year
,
class_names
=
CLASS_NAMES
):
DatasetCatalog
.
register
(
name
,
lambda
:
load_voc_instances_wsl_contain_total
(
dirname
,
split
,
class_names
))
MetadataCatalog
.
get
(
name
).
set
(
...
...
@@ -1112,6 +1361,12 @@ def register_pascal_voc_wsl_contain_all_adaptive(name, dirname, split, year, cla
thing_classes
=
list
(
class_names
),
dirname
=
dirname
,
year
=
year
,
split
=
split
)
def
register_pascal_voc_wsl_contain_adaptive
(
name
,
dirname
,
split
,
year
,
class_names
=
CLASS_NAMES
):
DatasetCatalog
.
register
(
name
,
lambda
:
load_voc_instances_wsl_contain_adaptive
(
dirname
,
split
,
class_names
))
MetadataCatalog
.
get
(
name
).
set
(
thing_classes
=
list
(
class_names
),
dirname
=
dirname
,
year
=
year
,
split
=
split
)
def
register_pascal_voc_wsl_contain_w2f
(
name
,
dirname
,
split
,
year
,
class_names
=
CLASS_NAMES
):
DatasetCatalog
.
register
(
name
,
lambda
:
load_voc_instances_wsl_w2f
(
dirname
,
split
,
class_names
))
MetadataCatalog
.
get
(
name
).
set
(
...
...
detectron2/evaluation/pascal_voc_evaluation.py
浏览文件 @
07f26afb
...
...
@@ -29,6 +29,7 @@ class PascalVOCDetectionEvaluator(DatasetEvaluator):
"""
def
__init__
(
self
,
dataset_name
):
# PS: 之所以会出现使用不同数据集结果 test 结果相同的原因好像是这里会根据 annotation 文件重新读一遍 gt 信息, 而不是用 dataset 中记录的
"""
Args:
dataset_name (str): name of the dataset, e.g., "voc_2007_test"
...
...
@@ -77,25 +78,34 @@ class PascalVOCDetectionEvaluator(DatasetEvaluator):
if
not
comm
.
is_main_process
():
return
# 只有单卡测试的时候将检测结果进行记录
#
import json
import
json
# # print(all_predictions)
# if len(all_predictions) == 1:
# all_prediction = all_predictions[0]
# print("???")
# tide_result = []
# for cls_id, class_name in enumerate(self._class_names):
# lines = all_prediction[cls_id]
# for line in lines:
# message = line.split(" ")
# tide_result.append(
# {
# "image_id": int(message[0]),
# "category_id": cls_id+1,
# "score": float(message[1]),
# "bbox": [float(message[2]), float(message[3]), float(message[4]), float(message[5])]
# }
# )
# json.dump(tide_result, open("/mnt/data3/suilin/wsod/visual/{}.json".format(self._dataset_name), "w"))
if
len
(
all_predictions
)
==
1
:
all_prediction
=
all_predictions
[
0
]
else
:
all_prediction
=
{}
for
p
in
all_predictions
:
for
key
in
list
(
p
.
keys
()):
if
key
not
in
all_prediction
:
all_prediction
[
key
]
=
p
[
key
]
else
:
all_prediction
[
key
]
=
all_prediction
[
key
]
+
p
[
key
]
tide_result
=
[]
for
cls_id
,
class_name
in
enumerate
(
self
.
_class_names
):
lines
=
all_prediction
[
cls_id
]
for
line
in
lines
:
message
=
line
.
split
(
" "
)
tide_result
.
append
(
{
"image_id"
:
int
(
message
[
0
]),
"category_id"
:
cls_id
+
1
,
"score"
:
float
(
message
[
1
]),
"bbox"
:
[
float
(
message
[
2
]),
float
(
message
[
3
]),
float
(
message
[
4
]),
float
(
message
[
5
])]
}
)
# json.dump(tide_result, open("/mnt/data3/suilin/wsod/visual/{}.json".format(self._dataset_name), "w"))
# json.dump(tide_result, open("/home/suil/codes/github/vis/{}_teach_iter1.json".format(self._dataset_name), "w"))
# else:
# pass
...
...
@@ -478,4 +488,4 @@ def voc_eval_corloc(detpath, annopath, imagesetfile, classname, ovthresh=0.5, us
else
:
F
.
append
(
image_ids
[
d
])
return
1.0
*
len
(
T
)
/
npos_im
\ No newline at end of file
return
1.0
*
len
(
T
)
/
npos_im
编辑
预览
支持
Markdown
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录