List of commits:
Subject Hash Author Date (UTC)
debug number not divisible by 8 a568fd7f294a8bd31b3db78437b4b6b51b5b41b9 Thai Thien 2019-09-22 04:36:06
pacnn 967074890d14ab0eefc277801860270a468e8f9f Thai Thien 2019-09-22 03:54:48
wip: pacnn 2192d7c7b449fecf3868877d9cfbc09bb6f7ae98 Thai Thien 2019-09-22 03:44:56
wip: pacnn 37620e5a9bc0f9516ea964ec58d9bdaa1c40ff36 Thai Thien 2019-09-22 03:14:42
fix training flow 2b87b1b26c7296b64493fdc49fedb421b249dfa3 Thai Thien 2019-09-17 18:00:35
dataset script bc5c052f5f956510ab95ef9a45434fd486c57fae Thai Thien 2019-09-16 17:21:13
evaluator ffc5bf8290ae0c469a9a18a2d061cfd1bfeee822 Thai Thien 2019-09-14 04:56:35
some more test for data loader 25173578cde7d4e9fe6c6140d1ee01caa4fcfc32 Thai Thien 2019-09-14 02:51:58
some visualize to debug data loader e4f52007616acf307bddbde79c0fb4f8c649c785 Thai Thien 2019-09-13 17:35:45
wip d7d44cad6774355bdfa45414258763f6c6a0c299 Thai Thien 2019-08-31 16:58:16
commit all 6dad7a58f7dbf9fc288ce9dd3e92be538851c2a7 Thai Thien 2019-08-29 19:10:44
input d1,d2,d3 match fc2a809241f8b6356d964c63d40cbebd55ca5f6c Thai Thien 2019-08-28 17:57:05
WIP 39eab26d061e61dfffbf164dbd5fd878299b7250 thient 2019-08-28 11:09:12
output of de is ok dd770386674df3e0fbebafdfc48a9352bc28967d thient 2019-08-28 10:54:09
code pacnn c49537b5cc91e96e4e35c9338d2c95b9bb41c672 Thai Thien 2019-08-27 16:35:27
crowd counting stuff da9f27a39cba9bdd021b6b5c562f5f7c2be50190 Thai Thien 2019-08-24 18:27:44
seem ok 53fa176c31669a0e89b04adf290cb398f0316c45 Thai Thien 2019-08-24 18:26:31
flow ok ad849681000818dfbcd0c1715c2858aed7236041 Thai Thien 2019-08-24 17:00:02
wip 23c3ec48497782bbc91d829e1c8a682502360ab9 Thai Thien 2019-08-24 14:19:22
work in progress, try to use https://pytorch.org/ignite/quickstart.html 39c824fe8fc2501628ee42c236a844df45521007 Thai Thien 2019-08-24 07:41:46
Commit a568fd7f294a8bd31b3db78437b4b6b51b5b41b9 - debug number not divisible by 8
Author: Thai Thien
Author date (UTC): 2019-09-22 04:36
Committer name: Thai Thien
Committer date (UTC): 2019-09-22 04:36
Parent(s): 967074890d14ab0eefc277801860270a468e8f9f
Signing key:
Tree: 5c73388ffc999be12841cbab2753755c7d4d7861
File Lines added Lines deleted
data_flow.py 2 2
main_pacnn.py 16 11
models/__init__.py 1 1
models/pacnn.py 8 1
models/test_PACNNWithPerspectiveMap.py 26 3
File data_flow.py changed (mode: 100644) (index 7e0162d..924b307)
... ... def load_data_shanghaitech_pacnn(img_path, train=True):
98 98 target1 = cv2.resize(target, (int(target.shape[1] / 8), int(target.shape[0] / 8)), target1 = cv2.resize(target, (int(target.shape[1] / 8), int(target.shape[0] / 8)),
99 99 interpolation=cv2.INTER_CUBIC) * 64 interpolation=cv2.INTER_CUBIC) * 64
100 100 target2 = cv2.resize(target, (int(target.shape[1] / 16), int(target.shape[0] / 16)), target2 = cv2.resize(target, (int(target.shape[1] / 16), int(target.shape[0] / 16)),
101 interpolation=cv2.INTER_CUBIC) * 64 #*2
101 interpolation=cv2.INTER_CUBIC) * 64 *2
102 102 target3 = cv2.resize(target, (int(target.shape[1] / 32), int(target.shape[0] / 32)), target3 = cv2.resize(target, (int(target.shape[1] / 32), int(target.shape[0] / 32)),
103 interpolation=cv2.INTER_CUBIC) * 64 #*4
103 interpolation=cv2.INTER_CUBIC) * 64 *4
104 104
105 105 return img, (target1, target2, target3) return img, (target1, target2, target3)
106 106
File main_pacnn.py changed (mode: 100644) (index 9902b26..449320c)
... ... from crowd_counting_error_metrics import CrowdCountingMeanAbsoluteError, CrowdCo
6 6 import torch import torch
7 7 from torch import nn from torch import nn
8 8 import torch.nn.functional as F import torch.nn.functional as F
9 from models import CSRNet,PACNN
9 from models import CSRNet, PACNN, PACNNWithPerspectiveMap
10 10 import os import os
11 11 import cv2 import cv2
12 12 from torchvision import datasets, transforms from torchvision import datasets, transforms
 
... ... if __name__ == "__main__":
26 26 print(args) print(args)
27 27 DATA_PATH = args.input DATA_PATH = args.input
28 28 DATASET_NAME = "shanghaitech" DATASET_NAME = "shanghaitech"
29 PACNN_PERSPECTIVE_AWARE_MODEL = False
29 30
30 31 # create list # create list
31 32 if DATASET_NAME is "shanghaitech": if DATASET_NAME is "shanghaitech":
 
... ... if __name__ == "__main__":
64 65 batch_size=1, num_workers=4) batch_size=1, num_workers=4)
65 66
66 67 # create model # create model
67 net = PACNN().to(device)
68 net = PACNNWithPerspectiveMap(perspective_aware_mode=PACNN_PERSPECTIVE_AWARE_MODEL).to(device)
68 69 criterion_mse = nn.MSELoss(size_average=False).to(device) criterion_mse = nn.MSELoss(size_average=False).to(device)
69 criterion_ssim = pytorch_ssim.SSIM(window_size=11).to(device)
70 criterion_ssim = pytorch_ssim.SSIM(window_size=5).to(device)
70 71
71 72 optimizer = torch.optim.SGD(net.parameters(), args.lr, optimizer = torch.optim.SGD(net.parameters(), args.lr,
72 73 momentum=args.momentum, momentum=args.momentum,
 
... ... if __name__ == "__main__":
84 85
85 86 # load data # load data
86 87 d1_label, d2_label, d3_label = label d1_label, d2_label, d3_label = label
87 d1_label = d1_label.to(device)
88 d2_label = d2_label.to(device)
89 d3_label = d3_label.to(device)
88 d1_label = d1_label.to(device).unsqueeze(0)
89 d2_label = d2_label.to(device).unsqueeze(0)
90 d3_label = d3_label.to(device).unsqueeze(0)
90 91
91 92 # forward pass # forward pass
92 93
93 d1, d2, d3 = net(train_img.to(device))
94 loss_1 = criterion_mse(d1, d1_label) + criterion_ssim(d1.unsqueeze(0), d1_label.unsqueeze(0))
95 loss_2 = criterion_mse(d2, d2_label) + criterion_ssim(d2.unsqueeze(0), d2_label.unsqueeze(0))
96 loss_3 = criterion_mse(d3, d3_label) + criterion_ssim(d3.unsqueeze(0), d3_label.unsqueeze(0))
97
94 d1, d2, d3, p_s, p, d = net(train_img.to(device))
95 loss_1 = criterion_mse(d1, d1_label) + criterion_ssim(d1, d1_label)
96 loss_2 = criterion_mse(d2, d2_label) + criterion_ssim(d2, d2_label)
97 loss_3 = criterion_mse(d3, d3_label) + criterion_ssim(d3, d3_label)
98 98 loss = loss_1 + loss_2 + loss_3 loss = loss_1 + loss_2 + loss_3
99 if PACNN_PERSPECTIVE_AWARE_MODEL:
100 # TODO: loss for perspective map here
101 pass
102 loss_d = criterion_mse(d, d1_label) + criterion_ssim(d, d1_label)
103 loss += loss_d
99 104 loss.backward() loss.backward()
100 105 optimizer.step() optimizer.step()
101 106 loss_sum += loss.item() loss_sum += loss.item()
File models/__init__.py changed (mode: 100644) (index 48fc576..7424e78)
1 1 from .csrnet import CSRNet from .csrnet import CSRNet
2 from .pacnn import PACNN
2 from .pacnn import PACNN, PACNNWithPerspectiveMap
File models/pacnn.py changed (mode: 100644) (index 82897d0..9e7203e)
... ... class PACNNWithPerspectiveMap(nn.Module):
75 75 de23 = pespective_w_s * de2 + (1 - pespective_w_s)*(de2 + self.up23(de3)) de23 = pespective_w_s * de2 + (1 - pespective_w_s)*(de2 + self.up23(de3))
76 76 de = pespective_w * de1 + (1 - pespective_w)*(de1 + self.up12(de23)) de = pespective_w * de1 + (1 - pespective_w)*(de1 + self.up12(de23))
77 77 else: else:
78 #try:
79 pespective_w_s = None
80 pespective_w = None
78 81 de23 = (de2 + self.up23(de3))/2 de23 = (de2 + self.up23(de3))/2
79 82 de = (de1 + self.up12(de23))/2 de = (de1 + self.up12(de23))/2
80 return de
83 # except Exception as e:
84 # print("EXECEPTION ", e)
85 # print(x.size())
86 # print(de2.size(), de3.size())
87 return de1, de2, de3, pespective_w_s, pespective_w, de
81 88
82 89 def count_param(net): def count_param(net):
83 90 pytorch_total_params = sum(p.numel() for p in net.parameters()) pytorch_total_params = sum(p.numel() for p in net.parameters())
File models/test_PACNNWithPerspectiveMap.py changed (mode: 100644) (index cd3eab6..af8340f)
... ... import torch
4 4
5 5 class TestPACNNWithPerspectiveMap(TestCase): class TestPACNNWithPerspectiveMap(TestCase):
6 6
7 def test_debug_avg_schema_pacnn(self):
8 net = PACNNWithPerspectiveMap()
9 image = torch.rand(1, 3, 330, 512)
10 _, _, _, _, _, density_map = net(image)
11 print(density_map.size())
12
13
7 14 def test_avg_schema_pacnn(self): def test_avg_schema_pacnn(self):
8 15 net = PACNNWithPerspectiveMap() net = PACNNWithPerspectiveMap()
9 16 # image # image
10 17 # batch size, channel, h, w # batch size, channel, h, w
11 image = torch.rand(1, 3, 224, 224)
12 density_map = net(image)
18 image = torch.rand(1, 3, 330, 512)
19 _, _, _, _, _, density_map = net(image)
20 print(density_map.size())
21 image2 = torch.rand(1, 3, 225, 225)
22 _, _, _, _, _, density_map2 = net(image2)
23 print(density_map2.size())
24 image3 = torch.rand(1, 3, 226, 226)
25 _, _, _, _, _, density_map3 = net(image3)
26 print(density_map3.size())
27
28 image = torch.rand(1, 3, 227, 227)
29 _, _, _, _, _, density_map = net(image)
13 30 print(density_map.size()) print(density_map.size())
31 image2 = torch.rand(1, 3, 228, 228)
32 _, _, _, _, _, density_map2 = net(image2)
33 print(density_map2.size())
34 image3 = torch.rand(1, 3, 229, 229)
35 _, _, _, _, _, density_map3 = net(image3)
36 print(density_map3.size())
14 37
15 38 def test_perspective_aware_schema_pacnn(self): def test_perspective_aware_schema_pacnn(self):
16 39 net = PACNNWithPerspectiveMap(perspective_aware_mode=True) net = PACNNWithPerspectiveMap(perspective_aware_mode=True)
17 40 # image # image
18 41 # batch size, channel, h, w # batch size, channel, h, w
19 42 image = torch.rand(1, 3, 224, 224) image = torch.rand(1, 3, 224, 224)
20 density_map = net(image)
43 _, _, _, _, _, density_map = net(image)
21 44 print(density_map.size()) print(density_map.size())
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main