List of commits:
Subject Hash Author Date (UTC)
fix checkAndRecord, add gt count to test data loader 4290b7bb534ef0f785757b508c8e839ec6b3f9dc Thai Thien 2020-05-23 14:38:51
log some more be343dcc3b65f15f30e395eac3ca8271c09d1634 Thai Thien 2020-05-21 17:43:04
cometml project name 57ca8f68cbb0b650da74f7117ee60bd25bf0d8d4 Thai Thien 2020-05-21 17:39:54
train 1acb89895774528dcec61fad2f2872f2613bff75 Thai Thien 2020-05-21 17:34:11
train e67e3fcb8b77db53f990f8582e43c053f69d6c09 Thai Thien 2020-05-21 17:31:12
fix script e864e72a9b1fd0e6dfe200b3a9b569016f9894c0 Thai Thien 2020-05-21 17:24:38
rename 4f5e84a74bfd4f2a4f60554b9037eb097bafb0df Thai Thien 2020-05-21 17:23:08
implement the main flow f31d93a1f3f78575c9ee06bc47b541007789643a Thai Thien 2020-05-21 17:16:52
script 4d177e367c6609a0592525ee44b32bcbe43536a6 Thai Thien 2020-05-19 16:30:08
python code to split train data to train and validate 0dfb94063b0bdc7aa660b78ab61b9ee5e61a4199 Thai Thien 2020-05-18 16:19:21
fix dim mismatch 93ea7669d891301e9c00aadccdea27bb5e138656 Thai Thien 2020-05-12 17:19:33
train h1 bigtail ea6391257cd243098cbbb771e705f1f115b845df Thai Thien 2020-05-12 16:58:26
mse mean e96c22a36e305681d7fed415a5a949fa0c1791c9 Thai Thien 2020-05-10 18:32:02
no fix 7bd97e91de5d7c2d307407287c82e60e893c0c92 Thai Thien 2020-05-10 18:22:45
no fix fc20ae6922c2e53f7d37f4228fb921894cd78eab Thai Thien 2020-05-10 18:19:59
t9 d8ef865ea602670548e897d8b7ac4c925cc9b393 Thai Thien 2020-05-10 18:19:30
test with L1 loss 6492b65da4bdf6351b661f39b6bce6f08d37f17c Thai Thien 2020-05-10 18:10:49
H2 1d6d11b2eeecb67dd7d329e38de61b872870a9aa Thai Thien 2020-05-06 17:42:52
do something with l1 loss 5268c4fc163bb512f293fbac381a64a75c4fe462 Thai Thien 2020-05-06 17:32:45
typo b7b8e2303ce99b2196402ec93334598598e71e5a Thai Thien 2020-05-05 17:32:31
Commit 4290b7bb534ef0f785757b508c8e839ec6b3f9dc - fix checkAndRecord, add gt count to test data loader
Author: Thai Thien
Author date (UTC): 2020-05-23 14:38
Committer name: Thai Thien
Committer date (UTC): 2020-05-23 14:38
Parent(s): be343dcc3b65f15f30e395eac3ca8271c09d1634
Signing key:
Tree: 118ee79eb99d29f88c5f0bfa7fc15669a69914f6
File Lines added Lines deleted
crowd_counting_error_metrics.py 51 0
data_flow.py 27 0
experiment_main.py 2 2
File crowd_counting_error_metrics.py changed (mode: 100644) (index 1165f1a..42efe7b)
... ... class CrowdCountingMeanSquaredError(Metric):
52 52 raise NotComputableError('MeanSquaredError must have at least one example before it can be computed.') raise NotComputableError('MeanSquaredError must have at least one example before it can be computed.')
53 53 return math.sqrt(self._sum_of_squared_errors / self._num_examples) return math.sqrt(self._sum_of_squared_errors / self._num_examples)
54 54
55 ###########################################
56
57
58 class CrowdCountingMeanAbsoluteErrorWithCount(Metric):
59 """
60 Calculates the mean absolute error.
61 Compare directly with original count
62
63 - `update` must receive output of the form `(y_pred, y, count)`.
64 """
65 def reset(self):
66 self._sum_of_absolute_errors = 0.0
67 self._num_examples = 0
68
69 def update(self, output):
70 y_pred, y, true_count = output
71 pred_count = torch.sum(y_pred)
72 # true_count = torch.sum(y)
73 absolute_errors = torch.abs(pred_count - true_count)
74 self._sum_of_absolute_errors += torch.sum(absolute_errors).item()
75 self._num_examples += y.shape[0]
76
77 def compute(self):
78 if self._num_examples == 0:
79 raise NotComputableError('MeanAbsoluteError must have at least one example before it can be computed.')
80 return self._sum_of_absolute_errors / self._num_examples
81
82
83 class CrowdCountingMeanSquaredErrorWithCount(Metric):
84 """
85 Calculates the mean squared error.
86 Compare directly with original count
87
88 - `update` must receive output of the form `(y_pred, y, count)`.
89 """
90 def reset(self):
91 self._sum_of_squared_errors = 0.0
92 self._num_examples = 0
93
94 def update(self, output):
95 y_pred, y, true_count = output
96 pred_count = torch.sum(y_pred)
97 # true_count = torch.sum(y)
98 squared_errors = torch.pow(pred_count - true_count, 2)
99 self._sum_of_squared_errors += torch.sum(squared_errors).item()
100 self._num_examples += y.shape[0]
101
102 def compute(self):
103 if self._num_examples == 0:
104 raise NotComputableError('MeanSquaredError must have at least one example before it can be computed.')
105 return math.sqrt(self._sum_of_squared_errors / self._num_examples)
55 106
File data_flow.py changed (mode: 100644) (index 5c329ef..9f5d04e)
... ... from torch.utils.data import Dataset
17 17 from PIL import Image from PIL import Image
18 18 import torchvision.transforms.functional as F import torchvision.transforms.functional as F
19 19 from torchvision import datasets, transforms from torchvision import datasets, transforms
20 import scipy
21
20 22
21 23 """ """
22 24 create a list of file (full directory) create a list of file (full directory)
23 25 """ """
24 26
27 def count_gt_annotation_sha(mat_path):
28 """
29 read the annotation and count number of head from annotation
30 :param mat_path:
31 :return: count
32 """
33 mat = scipy.io.loadmat(mat_path)
34 gt = mat["image_info"][0, 0][0, 0][0]
35 return len(gt)
36
25 37 def create_training_image_list(data_path): def create_training_image_list(data_path):
26 38 """ """
27 39 create a list of absolutely path of jpg file create a list of absolutely path of jpg file
 
... ... def load_data_shanghaitech_rnd(img_path, train=True):
134 146 interpolation=cv2.INTER_CUBIC) * 64 interpolation=cv2.INTER_CUBIC) * 64
135 147 # target1 = target1.unsqueeze(0) # make dim (batch size, channel size, x, y) to make model output # target1 = target1.unsqueeze(0) # make dim (batch size, channel size, x, y) to make model output
136 148 target1 = np.expand_dims(target1, axis=0) # make dim (batch size, channel size, x, y) to make model output target1 = np.expand_dims(target1, axis=0) # make dim (batch size, channel size, x, y) to make model output
149
150 if not train:
151 # get correct people head count from head annotation
152 mat_path = img_path.replace('.jpg', '.map').replace('images', 'ground-truth').replace('IMG', 'GT_IMG')
153 gt_count = count_gt_annotation_sha(mat_path)
154 return img, target1, gt_count
155
137 156 return img, target1 return img, target1
138 157
139 158
 
... ... def load_data_shanghaitech_20p(img_path, train=True):
256 275 interpolation=cv2.INTER_CUBIC) * target_factor * target_factor interpolation=cv2.INTER_CUBIC) * target_factor * target_factor
257 276 # target1 = target1.unsqueeze(0) # make dim (batch size, channel size, x, y) to make model output # target1 = target1.unsqueeze(0) # make dim (batch size, channel size, x, y) to make model output
258 277 target1 = np.expand_dims(target1, axis=0) # make dim (batch size, channel size, x, y) to make model output target1 = np.expand_dims(target1, axis=0) # make dim (batch size, channel size, x, y) to make model output
278
279 if not train:
280 # get correct people head count from head annotation
281 mat_path = img_path.replace('.jpg', '.map').replace('images', 'ground-truth').replace('IMG', 'GT_IMG')
282 gt_count = count_gt_annotation_sha(mat_path)
283 return img, target1, gt_count
284
259 285 return img, target1 return img, target1
260 286
261 287
 
... ... def my_collate(batch): # batch size 4 [{tensor image, tensor label},{},{},{}] co
750 776 # so how to sample another dataset entry? # so how to sample another dataset entry?
751 777 return torch.utils.data.dataloader.default_collate(batch) return torch.utils.data.dataloader.default_collate(batch)
752 778
779
753 780 def get_dataloader(train_list, val_list, test_list, dataset_name="shanghaitech", visualize_mode=False, batch_size=1, train_loader_for_eval_check = False): def get_dataloader(train_list, val_list, test_list, dataset_name="shanghaitech", visualize_mode=False, batch_size=1, train_loader_for_eval_check = False):
754 781 if visualize_mode: if visualize_mode:
755 782 transformer = transforms.Compose([ transformer = transforms.Compose([
File experiment_main.py changed (mode: 100644) (index 53441ce..974dcfc)
... ... if __name__ == "__main__":
219 219 print("evaluate_valid_timer ", evaluate_validate_timer.value()) print("evaluate_valid_timer ", evaluate_validate_timer.value())
220 220
221 221 # check if that validate is best # check if that validate is best
222 flag_mae = best_mae.checkAndRecord(metrics['mae'])
223 flag_mse = best_mse.checkAndRecord(metrics['mse'])
222 flag_mae = best_mae.checkAndRecord(metrics['mae'], metrics['mse'])
223 flag_mse = best_mse.checkAndRecord(metrics['mae'], metrics['mse'])
224 224
225 225 if flag_mae or flag_mse: if flag_mae or flag_mse:
226 226 experiment.log_metric("valid_best_mae", metrics['mae']) experiment.log_metric("valid_best_mae", metrics['mae'])
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main