List of commits:
Subject Hash Author Date (UTC)
implement attn_can_adcrowdnet ffd38664a43d861c20cdc225746b9ce2a00260c7 Thai Thien 2020-02-27 15:10:27
WIP: add can-adcrowdnet 5620b83449b31d00a367c8de77e431e19a5ccfb3 Thai Thien 2020-02-25 11:31:38
add readable timestamp viz ae1fdb49ddb9ea77659529dceb7fb87c2790c8dc Thai Thien 2020-02-24 03:49:57
change save name prefix c53a86f30fb8fd4e8f3a409eb67827d56a43ae5c Thai Thien 2020-02-02 10:48:15
training flow that work fb242273e8f696916f9d1ff4bb76b4e5869799ef Thai Thien 2020-02-02 10:42:01
fix the dataloader for shanghaitech 5f2aee9f316e6555e6a70c6ad037a4e6b491867b Thai Thien 2020-02-02 09:19:50
context aware visualize seem ok 1bdb6ffe77ca4e40ef8f299b2506df2266243db4 Thai Thien 2020-02-02 05:07:10
visualize eval context aware network seem ok f3fe45c23dfeab3730624737efabb0b14d23c25b Thai Thien 2020-02-02 04:50:34
visualize_shanghaitech_pacnn_with_perspective run without error 12366a2de2bd60ff4bd36e6132d44e37dedf7462 Thai Thien 2020-02-02 04:21:16
eval context aware network on ShanghaiTechB can run e8c454d2b6d287c830c1286c9a37884b3cfc615f Thai Thien 2020-02-02 04:09:14
import ShanghaiTechDataPath in data_util e81eb56315d44375ff5c0e747d61456601492f8f Thai Thien 2020-02-02 04:04:36
add model_context_aware_network.py 2a36025c001d85afc064c090f4d22987b328977b Thai Thien 2020-02-02 03:46:38
PACNN (TODO: test this) 44d5ae7ec57c760fb4f105dd3e3492148a0cc075 Thai Thien 2020-02-02 03:40:26
add data path 80134de767d0137a663f343e4606bafc57a1bc1f Thai Thien 2020-02-02 03:38:21
test if ShanghaiTech datapath is correct 97ee84944a4393ec3732879b24f614826f8e7798 Thai Thien 2020-02-01 03:57:31
refactor and test ShanghaiTech datapath 9542ebc00f257edc38690180b7a4353794be4019 Thai Thien 2020-02-01 03:53:49
fix the unzip flow b53c5989935335377eb6a88c942713d3eccc5df7 Thai Thien 2020-02-01 03:53:13
data_script run seem ok 67420c08fc1c10a66404d3698994865726a106cd Thai Thien 2020-02-01 03:33:18
add perspective 642d6fff8c9f31e510fda85a7fb631fb855d8a6d Thai Thien 2019-10-06 16:54:44
fix padding with p 86c2fa07822d956a34b3b37e14da485a4249f01b Thai Thien 2019-10-06 02:52:58
Commit ffd38664a43d861c20cdc225746b9ce2a00260c7 - implement attn_can_adcrowdnet
Author: Thai Thien
Author date (UTC): 2020-02-27 15:10
Committer name: Thai Thien
Committer date (UTC): 2020-02-27 15:10
Parent(s): 5620b83449b31d00a367c8de77e431e19a5ccfb3
Signing key:
Tree: 428989c27640eaea34793c1c9132ccc6569ec199
File Lines added Lines deleted
models/attn_can_adcrowdnet.py 10 2
models/squeeze_and_excitation.py 144 0
File models/attn_can_adcrowdnet.py copied from file models/can_adcrowdnet.py (similarity 94%) (mode: 100644) (index e022339..1f95223)
... ... import collections
5 5 import torch.nn.functional as F import torch.nn.functional as F
6 6 import os import os
7 7 from .deform_conv_v2 import DeformConv2d from .deform_conv_v2 import DeformConv2d
8 from .squeeze_and_excitation import ChannelSpatialSELayer
8 9 # from dcn.modules.deform_conv import DeformConvPack, ModulatedDeformConvPack # from dcn.modules.deform_conv import DeformConvPack, ModulatedDeformConvPack
9 10
10 11
11 class CanAdcrowdNet(nn.Module):
12 class AttnCanAdcrowdNet(nn.Module):
12 13 def __init__(self, load_weights=False): def __init__(self, load_weights=False):
13 super(CanAdcrowdNet, self).__init__()
14 super(AttnCanAdcrowdNet, self).__init__()
14 15 self.frontend_feat = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512] self.frontend_feat = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512]
15 16 self.frontend = make_layers(self.frontend_feat) self.frontend = make_layers(self.frontend_feat)
17
18 self.csSE = ChannelSpatialSELayer(num_channels=512, reduction_ratio=1)
19
16 20 self.concat_filter_layer = nn.Conv2d(1024, 512, kernel_size=3, padding=2, dilation=2) self.concat_filter_layer = nn.Conv2d(1024, 512, kernel_size=3, padding=2, dilation=2)
17 21
18 22 self.deform_conv_1_3 = DeformConv2d(512, 256, kernel_size=3, stride=1, padding=1) self.deform_conv_1_3 = DeformConv2d(512, 256, kernel_size=3, stride=1, padding=1)
 
... ... class CanAdcrowdNet(nn.Module):
50 54
51 55 def forward(self, x): def forward(self, x):
52 56 fv = self.frontend(x) fv = self.frontend(x)
57
58 # concurrent spatial and channel squeeze & excitation
59 fv = self.csSE(fv)
60
53 61 # S=1 # S=1
54 62 ave1 = nn.functional.adaptive_avg_pool2d(fv, (1, 1)) ave1 = nn.functional.adaptive_avg_pool2d(fv, (1, 1))
55 63 ave1 = self.conv1_1(ave1) ave1 = self.conv1_1(ave1)
File models/squeeze_and_excitation.py added (mode: 100644) (index 0000000..95d3f4a)
1 """
2 Squeeze and Excitation Module
3 *****************************
4
5 Collection of squeeze and excitation classes where each can be inserted as a block into a neural network architechture
6
7 1. `Channel Squeeze and Excitation <https://arxiv.org/abs/1709.01507>`_
8 2. `Spatial Squeeze and Excitation <https://arxiv.org/abs/1803.02579>`_
9 3. `Channel and Spatial Squeeze and Excitation <https://arxiv.org/abs/1803.02579>`_
10
11 """
12
13 from enum import Enum
14
15 import torch
16 import torch.nn as nn
17 import torch.nn.functional as F
18
19
20 class ChannelSELayer(nn.Module):
21 """
22 Re-implementation of Squeeze-and-Excitation (SE) block described in:
23 *Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507*
24
25 """
26
27 def __init__(self, num_channels, reduction_ratio=2):
28 """
29
30 :param num_channels: No of input channels
31 :param reduction_ratio: By how much should the num_channels should be reduced
32 """
33 super(ChannelSELayer, self).__init__()
34 num_channels_reduced = num_channels // reduction_ratio
35 self.reduction_ratio = reduction_ratio
36 self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True)
37 self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True)
38 self.relu = nn.ReLU()
39 self.sigmoid = nn.Sigmoid()
40
41 def forward(self, input_tensor):
42 """
43
44 :param input_tensor: X, shape = (batch_size, num_channels, H, W)
45 :return: output tensor
46 """
47 batch_size, num_channels, H, W = input_tensor.size()
48 # Average along each channel
49 squeeze_tensor = input_tensor.view(batch_size, num_channels, -1).mean(dim=2)
50
51 # channel excitation
52 fc_out_1 = self.relu(self.fc1(squeeze_tensor))
53 fc_out_2 = self.sigmoid(self.fc2(fc_out_1))
54
55 a, b = squeeze_tensor.size()
56 output_tensor = torch.mul(input_tensor, fc_out_2.view(a, b, 1, 1))
57 return output_tensor
58
59
60 class SpatialSELayer(nn.Module):
61 """
62 Re-implementation of SE block -- squeezing spatially and exciting channel-wise described in:
63 *Roy et al., Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, MICCAI 2018*
64 """
65
66 def __init__(self, num_channels):
67 """
68
69 :param num_channels: No of input channels
70 """
71 super(SpatialSELayer, self).__init__()
72 self.conv = nn.Conv2d(num_channels, 1, 1)
73 self.sigmoid = nn.Sigmoid()
74
75 def forward(self, input_tensor, weights=None):
76 """
77
78 :param weights: weights for few shot learning
79 :param input_tensor: X, shape = (batch_size, num_channels, H, W)
80 :return: output_tensor
81 """
82 # spatial squeeze
83 batch_size, channel, a, b = input_tensor.size()
84
85 if weights is not None:
86 weights = torch.mean(weights, dim=0)
87 weights = weights.view(1, channel, 1, 1)
88 out = F.conv2d(input_tensor, weights)
89 else:
90 out = self.conv(input_tensor)
91 squeeze_tensor = self.sigmoid(out)
92
93 # spatial excitation
94 # print(input_tensor.size(), squeeze_tensor.size())
95 squeeze_tensor = squeeze_tensor.view(batch_size, 1, a, b)
96 output_tensor = torch.mul(input_tensor, squeeze_tensor)
97 #output_tensor = torch.mul(input_tensor, squeeze_tensor)
98 return output_tensor
99
100
101 class ChannelSpatialSELayer(nn.Module):
102 """
103 Re-implementation of concurrent spatial and channel squeeze & excitation:
104 *Roy et al., Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, MICCAI 2018, arXiv:1803.02579*
105 """
106
107 def __init__(self, num_channels, reduction_ratio=2):
108 """
109
110 :param num_channels: No of input channels
111 :param reduction_ratio: By how much should the num_channels should be reduced
112 """
113 super(ChannelSpatialSELayer, self).__init__()
114 self.cSE = ChannelSELayer(num_channels, reduction_ratio)
115 self.sSE = SpatialSELayer(num_channels)
116
117 def forward(self, input_tensor):
118 """
119
120 :param input_tensor: X, shape = (batch_size, num_channels, H, W)
121 :return: output_tensor
122 """
123 output_tensor = torch.max(self.cSE(input_tensor), self.sSE(input_tensor))
124 return output_tensor
125
126
127 class SELayer(Enum):
128 """
129 Enum restricting the type of SE Blockes available. So that type checking can be adding when adding these blockes to
130 a neural network::
131
132 if self.se_block_type == se.SELayer.CSE.value:
133 self.SELayer = se.ChannelSpatialSELayer(params['num_filters'])
134
135 elif self.se_block_type == se.SELayer.SSE.value:
136 self.SELayer = se.SpatialSELayer(params['num_filters'])
137
138 elif self.se_block_type == se.SELayer.CSSE.value:
139 self.SELayer = se.ChannelSpatialSELayer(params['num_filters'])
140 """
141 NONE = 'NONE'
142 CSE = 'CSE'
143 SSE = 'SSE'
144 CSSE = 'CSSE'
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main