List of commits:
Subject Hash Author Date (UTC)
meow 1ad19a22a310992e27a26471feeb37375124d075 tthien 2019-09-29 18:25:43
fix pacnn perspective map 453ece3ccb818889ba895bfc4285f7905d33cba5 Thai Thien 2019-09-25 17:20:33
apex not work so well da8c0dd57297f972201f31d57e66897177922f48 Thai Thien 2019-09-24 17:25:59
fix data loader pacnn so it will scale up with correct number of people 11d55b50d764511f2491291f0208fee0905dec49 Thai Thien 2019-09-24 15:40:56
add comet ml a9d4b89ce594f5e241168ccafdcdf0f150ea0ebb Thai Thien 2019-09-23 17:07:58
fix pacnn avg schema c2140a96886195782e5689c24aeeb4fe7a2db7ad Thai Thien 2019-09-22 17:35:01
debug number not divisible by 8 a568fd7f294a8bd31b3db78437b4b6b51b5b41b9 Thai Thien 2019-09-22 04:36:06
pacnn 967074890d14ab0eefc277801860270a468e8f9f Thai Thien 2019-09-22 03:54:48
wip: pacnn 2192d7c7b449fecf3868877d9cfbc09bb6f7ae98 Thai Thien 2019-09-22 03:44:56
wip: pacnn 37620e5a9bc0f9516ea964ec58d9bdaa1c40ff36 Thai Thien 2019-09-22 03:14:42
fix training flow 2b87b1b26c7296b64493fdc49fedb421b249dfa3 Thai Thien 2019-09-17 18:00:35
dataset script bc5c052f5f956510ab95ef9a45434fd486c57fae Thai Thien 2019-09-16 17:21:13
evaluator ffc5bf8290ae0c469a9a18a2d061cfd1bfeee822 Thai Thien 2019-09-14 04:56:35
some more test for data loader 25173578cde7d4e9fe6c6140d1ee01caa4fcfc32 Thai Thien 2019-09-14 02:51:58
some visualize to debug data loader e4f52007616acf307bddbde79c0fb4f8c649c785 Thai Thien 2019-09-13 17:35:45
wip d7d44cad6774355bdfa45414258763f6c6a0c299 Thai Thien 2019-08-31 16:58:16
commit all 6dad7a58f7dbf9fc288ce9dd3e92be538851c2a7 Thai Thien 2019-08-29 19:10:44
input d1,d2,d3 match fc2a809241f8b6356d964c63d40cbebd55ca5f6c Thai Thien 2019-08-28 17:57:05
WIP 39eab26d061e61dfffbf164dbd5fd878299b7250 thient 2019-08-28 11:09:12
output of de is ok dd770386674df3e0fbebafdfc48a9352bc28967d thient 2019-08-28 10:54:09
Commit 1ad19a22a310992e27a26471feeb37375124d075 - meow
Author: tthien
Author date (UTC): 2019-09-29 18:25
Committer name: tthien
Committer date (UTC): 2019-09-29 18:25
Parent(s): 453ece3ccb818889ba895bfc4285f7905d33cba5
Signing key:
Tree: c011d03d1eb4b527115e109816a40e6d502cc486
File Lines added Lines deleted
main_pacnn.py 25 16
model_util.py 8 2
train_script/train_pacnn_shanghaitechA.sh 1 1
File main_pacnn.py changed (mode: 100644) (index 43b6c3d..b42c5a0)
1 1 from comet_ml import Experiment from comet_ml import Experiment
2 2 from args_util import real_args_parse from args_util import real_args_parse
3 3 from data_flow import get_train_val_list, get_dataloader, create_training_image_list from data_flow import get_train_val_list, get_dataloader, create_training_image_list
4 from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
5 from ignite.metrics import Loss, MeanAbsoluteError, MeanSquaredError
6 4 from crowd_counting_error_metrics import CrowdCountingMeanAbsoluteError, CrowdCountingMeanSquaredError from crowd_counting_error_metrics import CrowdCountingMeanAbsoluteError, CrowdCountingMeanSquaredError
7 5 import torch import torch
8 6 from torch import nn from torch import nn
 
... ... from evaluator import MAECalculator
18 16
19 17 from model_util import save_checkpoint from model_util import save_checkpoint
20 18
21 import apex
22 from apex import amp
19 # import apex
20 # from apex import amp
23 21
24 22 if __name__ == "__main__": if __name__ == "__main__":
25 23 # import comet_ml in the top of your file # import comet_ml in the top of your file
26 24
27 25
28 MODEL_SAVE_NAME = "dev5"
26 MODEL_SAVE_NAME = "dev7"
29 27 # Add the following code anywhere in your machine learning file # Add the following code anywhere in your machine learning file
30 28 experiment = Experiment(api_key="S3mM1eMq6NumMxk2QJAXASkUM", experiment = Experiment(api_key="S3mM1eMq6NumMxk2QJAXASkUM",
31 29 project_name="pacnn-dev2", workspace="ttpro1995") project_name="pacnn-dev2", workspace="ttpro1995")
 
... ... if __name__ == "__main__":
40 38 print(args) print(args)
41 39 DATA_PATH = args.input DATA_PATH = args.input
42 40 DATASET_NAME = "shanghaitech" DATASET_NAME = "shanghaitech"
41 TOTAL_EPOCH = args.epochs
43 42 PACNN_PERSPECTIVE_AWARE_MODEL = True PACNN_PERSPECTIVE_AWARE_MODEL = True
44 43
45 44
 
... ... if __name__ == "__main__":
89 88 momentum=args.momentum, momentum=args.momentum,
90 89 weight_decay=args.decay) weight_decay=args.decay)
91 90 # Allow Amp to perform casts as required by the opt_level # Allow Amp to perform casts as required by the opt_level
92 net, optimizer = amp.initialize(net, optimizer, opt_level="O1", enabled=False)
93
94 for e in range(10):
95 print("start epoch ", e)
91 # net, optimizer = amp.initialize(net, optimizer, opt_level="O1", enabled=False)
92
93 current_save_model_name = ""
94 current_epoch = 0
95 while (current_epoch < TOTAL_EPOCH):
96 experiment.log_current_epoch(current_epoch)
97 current_epoch += 1
98 print("start epoch ", current_epoch)
96 99 loss_sum = 0 loss_sum = 0
97 100 sample = 0 sample = 0
98 101 start_time = time() start_time = time()
 
... ... if __name__ == "__main__":
120 123 pass pass
121 124 loss_d = criterion_mse(d, d1_label) + criterion_ssim(d, d1_label) loss_d = criterion_mse(d, d1_label) + criterion_ssim(d, d1_label)
122 125 loss += loss_d loss += loss_d
123 # loss.backward()
124 with amp.scale_loss(loss, optimizer) as scaled_loss:
125 scaled_loss.backward()
126 loss.backward()
127 # with amp.scale_loss(loss, optimizer) as scaled_loss:
128 # scaled_loss.backward()
126 129 optimizer.step() optimizer.step()
127 130 optimizer.zero_grad() optimizer.zero_grad()
128 131 loss_sum += loss.item() loss_sum += loss.item()
 
... ... if __name__ == "__main__":
135 138 experiment.log_metric("avg_loss_ministep", avg_loss_ministep) experiment.log_metric("avg_loss_ministep", avg_loss_ministep)
136 139 # if counting == 100: # if counting == 100:
137 140 # break # break
141
138 142 end_time = time() end_time = time()
139 143 avg_loss = loss_sum/sample avg_loss = loss_sum/sample
140 144 epoch_time = end_time - start_time epoch_time = end_time - start_time
141 print("==END epoch ", e, " =============================================")
145 print("==END epoch ", current_epoch, " =============================================")
142 146 print(epoch_time, avg_loss, sample) print(epoch_time, avg_loss, sample)
143 147 experiment.log_metric("avg_loss_epoch", avg_loss) experiment.log_metric("avg_loss_epoch", avg_loss)
144 148 print("=================================================================") print("=================================================================")
145 149
146 save_checkpoint({
150 current_save_model_name = save_checkpoint({
147 151 'model': net.state_dict(), 'model': net.state_dict(),
148 152 'optimizer': optimizer.state_dict(), 'optimizer': optimizer.state_dict(),
153 'e': current_epoch,
154 'PACNN_PERSPECTIVE_AWARE_MODEL': PACNN_PERSPECTIVE_AWARE_MODEL
149 155 # 'amp': amp.state_dict() # 'amp': amp.state_dict()
150 }, False, MODEL_SAVE_NAME)
156 }, False, MODEL_SAVE_NAME+"_"+str(current_epoch)+"_")
157
158 experiment.log_asset(current_save_model_name)
151 159
160 # end 1 epoch
152 161
153 162 # after epoch evaluate # after epoch evaluate
154 163 mae_calculator_d1 = MAECalculator() mae_calculator_d1 = MAECalculator()
 
... ... if __name__ == "__main__":
189 198 net = PACNNWithPerspectiveMap(PACNN_PERSPECTIVE_AWARE_MODEL).to(device) net = PACNNWithPerspectiveMap(PACNN_PERSPECTIVE_AWARE_MODEL).to(device)
190 199 print(net) print(net)
191 200
192 best_checkpoint = torch.load(MODEL_SAVE_NAME + "checkpoint.pth.tar")
201 best_checkpoint = torch.load(current_save_model_name)
193 202 net.load_state_dict(best_checkpoint['model']) net.load_state_dict(best_checkpoint['model'])
194 203
195 204 # device = "cpu" # device = "cpu"
File model_util.py changed (mode: 100644) (index 8bb883a..eeb3468)
... ... import h5py
2 2 import torch import torch
3 3 import shutil import shutil
4 4 import numpy as np import numpy as np
5 import os
6
5 7
6 8 def save_net(fname, net): def save_net(fname, net):
7 9 with h5py.File(fname, 'w') as h5f: with h5py.File(fname, 'w') as h5f:
 
... ... def load_net(fname, net):
17 19
18 20
19 21 def save_checkpoint(state, is_best, task_id, filename='checkpoint.pth.tar'): def save_checkpoint(state, is_best, task_id, filename='checkpoint.pth.tar'):
20 torch.save(state, task_id + filename)
22 if not os.path.exists("saved_model"):
23 os.makedirs("saved_model")
24 full_file_name = os.path.join("saved_model", task_id + filename)
25 torch.save(state, full_file_name)
21 26 if is_best: if is_best:
22 shutil.copyfile(task_id + filename, task_id + 'model_best.pth.tar')
27 shutil.copyfile(task_id + filename, task_id + 'model_best.pth.tar')
28 return full_file_name
File train_script/train_pacnn_shanghaitechA.sh changed (mode: 100644) (index 34be054..ced1a20)
1 1 #python /home/tt/project/crowd_counting_framework/main_pacnn.py --input /home/tt/project/crowd_counting_framework/data/ShanghaiTech/part_A #python /home/tt/project/crowd_counting_framework/main_pacnn.py --input /home/tt/project/crowd_counting_framework/data/ShanghaiTech/part_A
2 2
3 python /home/tt/project/crowd_counting_framework/main_pacnn.py --input /home/tt/project/crowd_counting_framework/data/ShanghaiTech/part_A
3 python main_pacnn.py --input data/ShanghaiTech/part_A --epochs 3
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main