Commit 34a33491 authored by suilin0432's avatar suilin0432
Browse files

add

parent c2578884
......@@ -663,4 +663,18 @@ _C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False]
# Use False for DeformableV1.
_C.MODEL.RESNETS.DEFORM_MODULATED = False
# Number of groups in deformable conv.
_C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1
\ No newline at end of file
_C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1
# ResNest 添加的参数设置
_C.MODEL.RESNETS.STRIDE_IN_1X1 = False
# Apply deep stem
_C.MODEL.RESNETS.DEEP_STEM = True
# Apply avg after conv2 in the BottleBlock
# When AVD=True, the STRIDE_IN_1X1 should be False
_C.MODEL.RESNETS.AVD = True
# Apply avg_down to the downsampling layer for residual path
_C.MODEL.RESNETS.AVG_DOWN = True
# Radix in ResNeSt
_C.MODEL.RESNETS.RADIX = 2
# Bottleneck_width in ResNeSt
_C.MODEL.RESNETS.BOTTLENECK_WIDTH = 64
......@@ -14,6 +14,7 @@ from .resnet import (
from .regnet import RegNet, build_regnet_backbone
from .vgg16 import build_vgg_backbone
from .vgg_torch import vgg16_bn, build_vgg16
from .resnest import build_resnest_backbone, build_resnest_fpn_backbone, ResNeSt
__all__ = [k for k in globals().keys() if not k.startswith("_")]
# TODO can expose more resnet blocks after careful consideration
This diff is collapsed.
"""Split-Attention"""
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Module, Linear, BatchNorm2d, ReLU
from torch.nn.modules.utils import _pair
from detectron2.layers import (
Conv2d,
get_norm,
)
__all__ = ['SplAtConv2d', 'SplAtConv2d_dcn']
class DropBlock2D(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError
class SplAtConv2d(Module):
"""Split-Attention Conv2d
"""
def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0),
dilation=(1, 1), groups=1, bias=True,
radix=2, reduction_factor=4,
rectify=False, rectify_avg=False, norm=None,
dropblock_prob=0.0, **kwargs):
super(SplAtConv2d, self).__init__()
padding = _pair(padding)
self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)
self.rectify_avg = rectify_avg
inter_channels = max(in_channels*radix//reduction_factor, 32)
self.radix = radix
self.cardinality = groups
self.channels = channels
self.dropblock_prob = dropblock_prob
if self.rectify:
from rfconv import RFConv2d
self.conv = RFConv2d(in_channels, channels*radix, kernel_size, stride, padding, dilation,
groups=groups*radix, bias=bias, average_mode=rectify_avg, **kwargs)
else:
self.conv = Conv2d(in_channels, channels*radix, kernel_size, stride, padding, dilation,
groups=groups*radix, bias=bias, **kwargs)
self.use_bn = norm is not None
if self.use_bn:
self.bn0 = get_norm(norm, channels*radix)
self.relu = ReLU(inplace=True)
self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
if self.use_bn:
self.bn1 = get_norm(norm, inter_channels)
self.fc2 = Conv2d(inter_channels, channels*radix, 1, groups=self.cardinality)
if dropblock_prob > 0.0:
self.dropblock = DropBlock2D(dropblock_prob, 3)
self.rsoftmax = rSoftMax(radix, groups)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn0(x)
if self.dropblock_prob > 0.0:
x = self.dropblock(x)
x = self.relu(x)
batch, rchannel = x.shape[:2]
if self.radix > 1:
splited = torch.split(x, rchannel//self.radix, dim=1)
gap = sum(splited)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
if self.use_bn:
gap = self.bn1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
if self.radix > 1:
attens = torch.split(atten, rchannel//self.radix, dim=1)
out = sum([att*split for (att, split) in zip(attens, splited)])
else:
out = atten * x
return out.contiguous()
class rSoftMax(nn.Module):
def __init__(self, radix, cardinality):
super().__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
class SplAtConv2d_dcn(Module):
"""Split-Attention Conv2d with dcn
"""
def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0),
dilation=(1, 1), groups=1, bias=True,
radix=2, reduction_factor=4,
rectify=False, rectify_avg=False, norm=None,
dropblock_prob=0.0,
deform_conv_op=None,
deformable_groups=1,
deform_modulated=False,
**kwargs):
super(SplAtConv2d_dcn, self).__init__()
self.deform_modulated = deform_modulated
padding = _pair(padding)
self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)
self.rectify_avg = rectify_avg
inter_channels = max(in_channels*radix//reduction_factor, 32)
self.radix = radix
self.cardinality = groups
self.channels = channels
self.dropblock_prob = dropblock_prob
if self.rectify:
from rfconv import RFConv2d
self.conv = RFConv2d(in_channels, channels*radix, kernel_size, stride, padding, dilation,
groups=groups*radix, bias=bias, average_mode=rectify_avg, **kwargs)
else:
self.conv = deform_conv_op(in_channels, channels*radix, kernel_size, stride, padding[0], dilation,
groups=groups*radix, bias=bias, deformable_groups=deformable_groups, **kwargs)
self.use_bn = norm is not None
if self.use_bn:
self.bn0 = get_norm(norm, channels*radix)
self.relu = ReLU(inplace=True)
self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
if self.use_bn:
self.bn1 = get_norm(norm, inter_channels)
self.fc2 = Conv2d(inter_channels, channels*radix, 1, groups=self.cardinality)
if dropblock_prob > 0.0:
self.dropblock = DropBlock2D(dropblock_prob, 3)
self.rsoftmax = rSoftMax(radix, groups)
def forward(self, x, offset_input):
if self.deform_modulated:
offset_x, offset_y, mask = torch.chunk(offset_input, 3, dim=1)
offset = torch.cat((offset_x, offset_y), dim=1)
mask = mask.sigmoid()
x = self.conv(x, offset, mask)
else:
x = self.conv(x, offset_input)
if self.use_bn:
x = self.bn0(x)
if self.dropblock_prob > 0.0:
x = self.dropblock(x)
x = self.relu(x)
batch, rchannel = x.shape[:2]
if self.radix > 1:
splited = torch.split(x, rchannel//self.radix, dim=1)
gap = sum(splited)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
if self.use_bn:
gap = self.bn1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
if self.radix > 1:
attens = torch.split(atten, rchannel//self.radix, dim=1)
out = sum([att*split for (att, split) in zip(attens, splited)])
else:
out = atten * x
return out.contiguous()
\ No newline at end of file
import torch
import sys
input = sys.argv[1]
output = sys.argv[2]
obj = torch.load(input, map_location="cpu")
newmodel = {}
save_model = {
"matching_heuristics": True,
"__author__": "regnet"
}
stem_dict = {
"0"
}
for k in list(obj.keys()):
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment