File models/attn_can_adcrowdnet.py copied from file models/can_adcrowdnet.py (similarity 94%) (mode: 100644) (index e022339..1f95223) |
... |
... |
import collections |
5 |
5 |
import torch.nn.functional as F |
import torch.nn.functional as F |
6 |
6 |
import os |
import os |
7 |
7 |
from .deform_conv_v2 import DeformConv2d |
from .deform_conv_v2 import DeformConv2d |
|
8 |
|
from .squeeze_and_excitation import ChannelSpatialSELayer |
8 |
9 |
# from dcn.modules.deform_conv import DeformConvPack, ModulatedDeformConvPack |
# from dcn.modules.deform_conv import DeformConvPack, ModulatedDeformConvPack |
9 |
10 |
|
|
10 |
11 |
|
|
11 |
|
class CanAdcrowdNet(nn.Module): |
|
|
12 |
|
class AttnCanAdcrowdNet(nn.Module): |
12 |
13 |
def __init__(self, load_weights=False): |
def __init__(self, load_weights=False): |
13 |
|
super(CanAdcrowdNet, self).__init__() |
|
|
14 |
|
super(AttnCanAdcrowdNet, self).__init__() |
14 |
15 |
self.frontend_feat = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512] |
self.frontend_feat = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512] |
15 |
16 |
self.frontend = make_layers(self.frontend_feat) |
self.frontend = make_layers(self.frontend_feat) |
|
17 |
|
|
|
18 |
|
self.csSE = ChannelSpatialSELayer(num_channels=512, reduction_ratio=1) |
|
19 |
|
|
16 |
20 |
self.concat_filter_layer = nn.Conv2d(1024, 512, kernel_size=3, padding=2, dilation=2) |
self.concat_filter_layer = nn.Conv2d(1024, 512, kernel_size=3, padding=2, dilation=2) |
17 |
21 |
|
|
18 |
22 |
self.deform_conv_1_3 = DeformConv2d(512, 256, kernel_size=3, stride=1, padding=1) |
self.deform_conv_1_3 = DeformConv2d(512, 256, kernel_size=3, stride=1, padding=1) |
|
... |
... |
class CanAdcrowdNet(nn.Module): |
50 |
54 |
|
|
51 |
55 |
def forward(self, x): |
def forward(self, x): |
52 |
56 |
fv = self.frontend(x) |
fv = self.frontend(x) |
|
57 |
|
|
|
58 |
|
# concurrent spatial and channel squeeze & excitation |
|
59 |
|
fv = self.csSE(fv) |
|
60 |
|
|
53 |
61 |
# S=1 |
# S=1 |
54 |
62 |
ave1 = nn.functional.adaptive_avg_pool2d(fv, (1, 1)) |
ave1 = nn.functional.adaptive_avg_pool2d(fv, (1, 1)) |
55 |
63 |
ave1 = self.conv1_1(ave1) |
ave1 = self.conv1_1(ave1) |
File models/squeeze_and_excitation.py added (mode: 100644) (index 0000000..95d3f4a) |
|
1 |
|
""" |
|
2 |
|
Squeeze and Excitation Module |
|
3 |
|
***************************** |
|
4 |
|
|
|
5 |
|
Collection of squeeze and excitation classes where each can be inserted as a block into a neural network architechture |
|
6 |
|
|
|
7 |
|
1. `Channel Squeeze and Excitation <https://arxiv.org/abs/1709.01507>`_ |
|
8 |
|
2. `Spatial Squeeze and Excitation <https://arxiv.org/abs/1803.02579>`_ |
|
9 |
|
3. `Channel and Spatial Squeeze and Excitation <https://arxiv.org/abs/1803.02579>`_ |
|
10 |
|
|
|
11 |
|
""" |
|
12 |
|
|
|
13 |
|
from enum import Enum |
|
14 |
|
|
|
15 |
|
import torch |
|
16 |
|
import torch.nn as nn |
|
17 |
|
import torch.nn.functional as F |
|
18 |
|
|
|
19 |
|
|
|
20 |
|
class ChannelSELayer(nn.Module): |
|
21 |
|
""" |
|
22 |
|
Re-implementation of Squeeze-and-Excitation (SE) block described in: |
|
23 |
|
*Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507* |
|
24 |
|
|
|
25 |
|
""" |
|
26 |
|
|
|
27 |
|
def __init__(self, num_channels, reduction_ratio=2): |
|
28 |
|
""" |
|
29 |
|
|
|
30 |
|
:param num_channels: No of input channels |
|
31 |
|
:param reduction_ratio: By how much should the num_channels should be reduced |
|
32 |
|
""" |
|
33 |
|
super(ChannelSELayer, self).__init__() |
|
34 |
|
num_channels_reduced = num_channels // reduction_ratio |
|
35 |
|
self.reduction_ratio = reduction_ratio |
|
36 |
|
self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True) |
|
37 |
|
self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True) |
|
38 |
|
self.relu = nn.ReLU() |
|
39 |
|
self.sigmoid = nn.Sigmoid() |
|
40 |
|
|
|
41 |
|
def forward(self, input_tensor): |
|
42 |
|
""" |
|
43 |
|
|
|
44 |
|
:param input_tensor: X, shape = (batch_size, num_channels, H, W) |
|
45 |
|
:return: output tensor |
|
46 |
|
""" |
|
47 |
|
batch_size, num_channels, H, W = input_tensor.size() |
|
48 |
|
# Average along each channel |
|
49 |
|
squeeze_tensor = input_tensor.view(batch_size, num_channels, -1).mean(dim=2) |
|
50 |
|
|
|
51 |
|
# channel excitation |
|
52 |
|
fc_out_1 = self.relu(self.fc1(squeeze_tensor)) |
|
53 |
|
fc_out_2 = self.sigmoid(self.fc2(fc_out_1)) |
|
54 |
|
|
|
55 |
|
a, b = squeeze_tensor.size() |
|
56 |
|
output_tensor = torch.mul(input_tensor, fc_out_2.view(a, b, 1, 1)) |
|
57 |
|
return output_tensor |
|
58 |
|
|
|
59 |
|
|
|
60 |
|
class SpatialSELayer(nn.Module): |
|
61 |
|
""" |
|
62 |
|
Re-implementation of SE block -- squeezing spatially and exciting channel-wise described in: |
|
63 |
|
*Roy et al., Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, MICCAI 2018* |
|
64 |
|
""" |
|
65 |
|
|
|
66 |
|
def __init__(self, num_channels): |
|
67 |
|
""" |
|
68 |
|
|
|
69 |
|
:param num_channels: No of input channels |
|
70 |
|
""" |
|
71 |
|
super(SpatialSELayer, self).__init__() |
|
72 |
|
self.conv = nn.Conv2d(num_channels, 1, 1) |
|
73 |
|
self.sigmoid = nn.Sigmoid() |
|
74 |
|
|
|
75 |
|
def forward(self, input_tensor, weights=None): |
|
76 |
|
""" |
|
77 |
|
|
|
78 |
|
:param weights: weights for few shot learning |
|
79 |
|
:param input_tensor: X, shape = (batch_size, num_channels, H, W) |
|
80 |
|
:return: output_tensor |
|
81 |
|
""" |
|
82 |
|
# spatial squeeze |
|
83 |
|
batch_size, channel, a, b = input_tensor.size() |
|
84 |
|
|
|
85 |
|
if weights is not None: |
|
86 |
|
weights = torch.mean(weights, dim=0) |
|
87 |
|
weights = weights.view(1, channel, 1, 1) |
|
88 |
|
out = F.conv2d(input_tensor, weights) |
|
89 |
|
else: |
|
90 |
|
out = self.conv(input_tensor) |
|
91 |
|
squeeze_tensor = self.sigmoid(out) |
|
92 |
|
|
|
93 |
|
# spatial excitation |
|
94 |
|
# print(input_tensor.size(), squeeze_tensor.size()) |
|
95 |
|
squeeze_tensor = squeeze_tensor.view(batch_size, 1, a, b) |
|
96 |
|
output_tensor = torch.mul(input_tensor, squeeze_tensor) |
|
97 |
|
#output_tensor = torch.mul(input_tensor, squeeze_tensor) |
|
98 |
|
return output_tensor |
|
99 |
|
|
|
100 |
|
|
|
101 |
|
class ChannelSpatialSELayer(nn.Module): |
|
102 |
|
""" |
|
103 |
|
Re-implementation of concurrent spatial and channel squeeze & excitation: |
|
104 |
|
*Roy et al., Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, MICCAI 2018, arXiv:1803.02579* |
|
105 |
|
""" |
|
106 |
|
|
|
107 |
|
def __init__(self, num_channels, reduction_ratio=2): |
|
108 |
|
""" |
|
109 |
|
|
|
110 |
|
:param num_channels: No of input channels |
|
111 |
|
:param reduction_ratio: By how much should the num_channels should be reduced |
|
112 |
|
""" |
|
113 |
|
super(ChannelSpatialSELayer, self).__init__() |
|
114 |
|
self.cSE = ChannelSELayer(num_channels, reduction_ratio) |
|
115 |
|
self.sSE = SpatialSELayer(num_channels) |
|
116 |
|
|
|
117 |
|
def forward(self, input_tensor): |
|
118 |
|
""" |
|
119 |
|
|
|
120 |
|
:param input_tensor: X, shape = (batch_size, num_channels, H, W) |
|
121 |
|
:return: output_tensor |
|
122 |
|
""" |
|
123 |
|
output_tensor = torch.max(self.cSE(input_tensor), self.sSE(input_tensor)) |
|
124 |
|
return output_tensor |
|
125 |
|
|
|
126 |
|
|
|
127 |
|
class SELayer(Enum): |
|
128 |
|
""" |
|
129 |
|
Enum restricting the type of SE Blockes available. So that type checking can be adding when adding these blockes to |
|
130 |
|
a neural network:: |
|
131 |
|
|
|
132 |
|
if self.se_block_type == se.SELayer.CSE.value: |
|
133 |
|
self.SELayer = se.ChannelSpatialSELayer(params['num_filters']) |
|
134 |
|
|
|
135 |
|
elif self.se_block_type == se.SELayer.SSE.value: |
|
136 |
|
self.SELayer = se.SpatialSELayer(params['num_filters']) |
|
137 |
|
|
|
138 |
|
elif self.se_block_type == se.SELayer.CSSE.value: |
|
139 |
|
self.SELayer = se.ChannelSpatialSELayer(params['num_filters']) |
|
140 |
|
""" |
|
141 |
|
NONE = 'NONE' |
|
142 |
|
CSE = 'CSE' |
|
143 |
|
SSE = 'SSE' |
|
144 |
|
CSSE = 'CSSE' |