Commit b9ab69b9 authored by suilin0432's avatar suilin0432
Browse files

update swin fzln

parent 19402617
......@@ -699,6 +699,7 @@ _C.MODEL.SWINT.WINDOW_SIZE = 7
_C.MODEL.SWINT.MLP_RATIO = 4
_C.MODEL.SWINT.DROP_PATH_RATE = 0.2
_C.MODEL.SWINT.APE = False
_C.MODEL.SWINT.FROZEN_LN = False
_C.MODEL.FPN.TOP_LEVELS = 2
_C.SOLVER.OPTIMIZER = "SGD"
......
......@@ -10,7 +10,6 @@ from detectron2.utils import comm, env
from .wrappers import BatchNorm2d
class FrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
......
......@@ -479,6 +479,7 @@ class SwinTransformer(Backbone):
ape=False,
patch_norm=True,
frozen_stages=-1,
frozen_ln=False,
use_checkpoint=False,
out_features=None):
super(SwinTransformer, self).__init__()
......@@ -489,6 +490,7 @@ class SwinTransformer(Backbone):
self.ape = ape
self.patch_norm = patch_norm
self.frozen_stages = frozen_stages
self.frozen_ln = frozen_ln
self.out_features = out_features
......@@ -550,6 +552,15 @@ class SwinTransformer(Backbone):
self.add_module(layer_name, layer)
self._freeze_stages()
if self.frozen_ln:
self._freeze_ln()
def _freeze_ln(self):
print("FrozenLN enabled !!!")
for name, value in self.named_parameters():
if "norm" in name:
# 因为 LayerNorm 在训练以及测试时候操作是一致的, 因此不进行梯度的更新即可
value.requires_grad = False
def _freeze_stages(self):
if self.frozen_stages >= 0:
......@@ -651,6 +662,7 @@ def build_swint_backbone(cfg, input_shape):
ape=cfg.MODEL.SWINT.APE,
patch_norm=True,
frozen_stages=cfg.MODEL.BACKBONE.FREEZE_AT,
frozen_ln=cfg.MODEL.SWINT.FROZEN_LN,
out_features=out_features
)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment