List of commits:
Subject Hash Author Date (UTC)
apex not work so well da8c0dd57297f972201f31d57e66897177922f48 Thai Thien 2019-09-24 17:25:59
fix data loader pacnn so it will scale up with correct number of people 11d55b50d764511f2491291f0208fee0905dec49 Thai Thien 2019-09-24 15:40:56
add comet ml a9d4b89ce594f5e241168ccafdcdf0f150ea0ebb Thai Thien 2019-09-23 17:07:58
fix pacnn avg schema c2140a96886195782e5689c24aeeb4fe7a2db7ad Thai Thien 2019-09-22 17:35:01
debug number not divisible by 8 a568fd7f294a8bd31b3db78437b4b6b51b5b41b9 Thai Thien 2019-09-22 04:36:06
pacnn 967074890d14ab0eefc277801860270a468e8f9f Thai Thien 2019-09-22 03:54:48
wip: pacnn 2192d7c7b449fecf3868877d9cfbc09bb6f7ae98 Thai Thien 2019-09-22 03:44:56
wip: pacnn 37620e5a9bc0f9516ea964ec58d9bdaa1c40ff36 Thai Thien 2019-09-22 03:14:42
fix training flow 2b87b1b26c7296b64493fdc49fedb421b249dfa3 Thai Thien 2019-09-17 18:00:35
dataset script bc5c052f5f956510ab95ef9a45434fd486c57fae Thai Thien 2019-09-16 17:21:13
evaluator ffc5bf8290ae0c469a9a18a2d061cfd1bfeee822 Thai Thien 2019-09-14 04:56:35
some more test for data loader 25173578cde7d4e9fe6c6140d1ee01caa4fcfc32 Thai Thien 2019-09-14 02:51:58
some visualize to debug data loader e4f52007616acf307bddbde79c0fb4f8c649c785 Thai Thien 2019-09-13 17:35:45
wip d7d44cad6774355bdfa45414258763f6c6a0c299 Thai Thien 2019-08-31 16:58:16
commit all 6dad7a58f7dbf9fc288ce9dd3e92be538851c2a7 Thai Thien 2019-08-29 19:10:44
input d1,d2,d3 match fc2a809241f8b6356d964c63d40cbebd55ca5f6c Thai Thien 2019-08-28 17:57:05
WIP 39eab26d061e61dfffbf164dbd5fd878299b7250 thient 2019-08-28 11:09:12
output of de is ok dd770386674df3e0fbebafdfc48a9352bc28967d thient 2019-08-28 10:54:09
code pacnn c49537b5cc91e96e4e35c9338d2c95b9bb41c672 Thai Thien 2019-08-27 16:35:27
crowd counting stuff da9f27a39cba9bdd021b6b5c562f5f7c2be50190 Thai Thien 2019-08-24 18:27:44
Commit da8c0dd57297f972201f31d57e66897177922f48 - apex not work so well
Author: Thai Thien
Author date (UTC): 2019-09-24 17:25
Committer name: Thai Thien
Committer date (UTC): 2019-09-24 17:25
Parent(s): 11d55b50d764511f2491291f0208fee0905dec49
Signing key:
Tree: 3bc1550981a3a195bfd04298691260aad4b656e4
File Lines added Lines deleted
main_pacnn.py 51 9
File main_pacnn.py changed (mode: 100644) (index a21c0f5..1078b73)
... ... from evaluator import MAECalculator
18 18
19 19 from model_util import save_checkpoint from model_util import save_checkpoint
20 20
21 import apex
22 from apex import amp
23
21 24 if __name__ == "__main__": if __name__ == "__main__":
22 25 # import comet_ml in the top of your file # import comet_ml in the top of your file
23 26
24 27
25 MODEL_SAVE_NAME = "dev4"
28 MODEL_SAVE_NAME = "dev5"
26 29 # Add the following code anywhere in your machine learning file # Add the following code anywhere in your machine learning file
27 30 experiment = Experiment(api_key="S3mM1eMq6NumMxk2QJAXASkUM", experiment = Experiment(api_key="S3mM1eMq6NumMxk2QJAXASkUM",
28 31 project_name="pacnn-dev", workspace="ttpro1995") project_name="pacnn-dev", workspace="ttpro1995")
 
... ... if __name__ == "__main__":
84 87 optimizer = torch.optim.SGD(net.parameters(), args.lr, optimizer = torch.optim.SGD(net.parameters(), args.lr,
85 88 momentum=args.momentum, momentum=args.momentum,
86 89 weight_decay=args.decay) weight_decay=args.decay)
90 # Allow Amp to perform casts as required by the opt_level
91 net, optimizer = amp.initialize(net, optimizer, opt_level="O1", enabled=False)
92
87 93 for e in range(10): for e in range(10):
88 94 print("start epoch ", e) print("start epoch ", e)
89 95 loss_sum = 0 loss_sum = 0
 
... ... if __name__ == "__main__":
113 119 pass pass
114 120 loss_d = criterion_mse(d, d1_label) + criterion_ssim(d, d1_label) loss_d = criterion_mse(d, d1_label) + criterion_ssim(d, d1_label)
115 121 loss += loss_d loss += loss_d
116 loss.backward()
122 # loss.backward()
123 with amp.scale_loss(loss, optimizer) as scaled_loss:
124 scaled_loss.backward()
117 125 optimizer.step() optimizer.step()
126 optimizer.zero_grad()
118 127 loss_sum += loss.item() loss_sum += loss.item()
119 128 sample += 1 sample += 1
120 optimizer.zero_grad()
121 129 counting += 1 counting += 1
122 130
123 131 if counting%10 ==0: if counting%10 ==0:
 
... ... if __name__ == "__main__":
135 143 print("=================================================================") print("=================================================================")
136 144
137 145 save_checkpoint({ save_checkpoint({
138 'state_dict': net.state_dict(),
146 'model': net.state_dict(),
147 'optimizer': optimizer.state_dict(),
148 # 'amp': amp.state_dict()
139 149 }, False, MODEL_SAVE_NAME) }, False, MODEL_SAVE_NAME)
140 150
141 151
142
143 # evaluate
144
145
152 # after epoch evaluate
153 mae_calculator_d1 = MAECalculator()
154 mae_calculator_d2 = MAECalculator()
155 mae_calculator_d3 = MAECalculator()
156 mae_calculator_final = MAECalculator()
157 with torch.no_grad():
158 for val_img, label in val_loader_pacnn:
159 net.eval()
160 # load data
161 d1_label, d2_label, d3_label = label
162
163 # forward pass
164 d1, d2, d3, p_s, p, d = net(val_img.to(device))
165
166 d1_label = d1_label.to(device)
167 d2_label = d2_label.to(device)
168 d3_label = d3_label.to(device)
169
170 # score
171 mae_calculator_d1.eval(d1.cpu().detach().numpy(), d1_label.cpu().detach().numpy())
172 mae_calculator_d2.eval(d2.cpu().detach().numpy(), d2_label.cpu().detach().numpy())
173 mae_calculator_d3.eval(d3.cpu().detach().numpy(), d3_label.cpu().detach().numpy())
174 mae_calculator_final.eval(d.cpu().detach().numpy(), d1_label.cpu().detach().numpy())
175 print("count ", mae_calculator_d1.count)
176 print("d1_val ", mae_calculator_d1.get_mae())
177 print("d2_val ", mae_calculator_d2.get_mae())
178 print("d3_val ", mae_calculator_d3.get_mae())
179 print("dfinal_val ", mae_calculator_final.get_mae())
180 experiment.log_metric("d1_val", mae_calculator_d1.get_mae())
181 experiment.log_metric("d2_val", mae_calculator_d2.get_mae())
182 experiment.log_metric("d3_val", mae_calculator_d3.get_mae())
183 experiment.log_metric("dfinal_val", mae_calculator_final.get_mae())
184
185
186 #############################################
187 # done training evaluate
146 188 net = PACNNWithPerspectiveMap(PACNN_PERSPECTIVE_AWARE_MODEL).to(device) net = PACNNWithPerspectiveMap(PACNN_PERSPECTIVE_AWARE_MODEL).to(device)
147 189 print(net) print(net)
148 190
149 191 best_checkpoint = torch.load(MODEL_SAVE_NAME + "checkpoint.pth.tar") best_checkpoint = torch.load(MODEL_SAVE_NAME + "checkpoint.pth.tar")
150 net.load_state_dict(best_checkpoint['state_dict'])
192 net.load_state_dict(best_checkpoint['model'])
151 193
152 194 # device = "cpu" # device = "cpu"
153 195 # TODO d1_val 155.97279205322266 # TODO d1_val 155.97279205322266
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main