List of commits:
Subject Hash Author Date (UTC)
fix flatten collate bb25738b0510915712a17daeb590b668b446b0be Thai Thien 2020-07-12 15:17:14
ccnn_v7_t6_shb c4f59ee01cbdab3f506302588b9667cd1c9f6411 Thai Thien 2020-07-12 14:19:03
do not *4 root if we have flatten augmentation list c534aa36bf314ea32643e92231194bd020d7bf1f Thai Thien 2020-07-12 14:19:00
train val split shb 61581543d16aaa2640bdee0b3573e41d1843770d Thai Thien 2020-07-12 14:06:25
flatten collate c04708ae0defc81dbf441395e1d27de6a1d598fc Thai Thien 2020-07-12 13:54:01
travis remove nightly d4b0c714823046bbafcd3c816d56f7079c76d126 Thai Thien 2020-07-12 13:27:55
travis e6368ec3102e01f1bdc71a80a78f0db3617d7e08 Thai Thien 2020-07-12 12:33:21
flatten_collate 1e460396875c205c42de27449f56e73cd4ec10e0 Thai Thien 2020-07-12 12:23:40
train val split test ratio to 0.1 5091da3f0b45d875a38c2829e4fec5e61116e869 Thai Thien 2020-07-11 03:14:26
f 1defee5dc452c2da5fb540ff1050f6e01fe1878b Thai Thien 2020-07-10 17:04:58
fix log db4d655a313b8f3951baf225a4b197fce4bcdd4b Thai Thien 2020-07-10 16:50:03
typo, trunc 3.0 not 4.0 ee29c49efd0a5087f11662997b8992d25671a33a Thai Thien 2020-07-10 16:44:55
let try with trunc 3.0 f8eac179b1fa79d6451f19a9e6a35b82b94646a4 Thai Thien 2020-07-10 16:43:41
train script 7e6984a9eaa6609182ad6c786f6742dd29f1d017 Thai Thien 2020-06-14 10:23:05
fix H3 71d41ac82f273857f7643d8e63e502c701888a5b Thai Thien 2020-06-14 10:10:39
typo bc78059e62b17a5a617e3e0294f5c72dd4bd347e Thai Thien 2020-06-14 10:06:35
H3_t1_sha H3_t2_sha H3_t2_shb_fixed ccnn_adam_t7_shb ccnn_adam_t7_sha ca7e1ccab41314f8a30e48609421db3a2c41bbe0 Thai Thien 2020-06-14 09:59:17
train script da9ad066d6d835f38eb9e9c2b771ff412124552a Thai Thien 2020-06-13 18:06:22
train script ccnnv9 8ab76b70dad11b1458f8c1bfc2f1765c6ece984d Thai Thien 2020-06-13 17:57:02
train script 7ab6cf827c811af9a0c9168de2fde4dbd0fb3e61 Thai Thien 2020-06-13 11:54:21
Commit bb25738b0510915712a17daeb590b668b446b0be - fix flatten collate
Author: Thai Thien
Author date (UTC): 2020-07-12 15:17
Committer name: Thai Thien
Committer date (UTC): 2020-07-12 15:17
Parent(s): c4f59ee01cbdab3f506302588b9667cd1c9f6411
Signing key:
Tree: bbd723e731f8f55310c18ea83d4bd542073b084e
File Lines added Lines deleted
data_flow.py 7 5
data_util/dataset_utils.py 23 2
data_util/test_dataset_utils.py 2 2
debug/explore_shb_fatten_list.py 3 3
File data_flow.py changed (mode: 100644) (index aaf7b31..806ba3c)
... ... def load_data_shanghaitech_non_overlap(img_path, train=True):
473 473 target1 = np.expand_dims(target1, axis=0) # make dim (batch size, channel size, x, y) to make model output target1 = np.expand_dims(target1, axis=0) # make dim (batch size, channel size, x, y) to make model output
474 474 crop_img.append(img) crop_img.append(img)
475 475 crop_label.append(target1) crop_label.append(target1)
476 return crop_img, crop_label
476 return crop_img, crop_label
477 477
478 478 if not train: if not train:
479 479 # get correct people head count from head annotation # get correct people head count from head annotation
 
... ... class ListDataset(Dataset):
937 937 if img is None or target is None: if img is None or target is None:
938 938 return None return None
939 939 if self.transform is not None: if self.transform is not None:
940 img = self.transform(img)
940 if isinstance(img, list):
941 # for case of generate multiple augmentation per sample
942 img_r = [self.transform(img_item) for img_item in img]
943 img = img_r
944 else:
945 img = self.transform(img)
941 946 return img, target return img, target
942 947
943
944
945
946 948 def get_dataloader(train_list, val_list, test_list, dataset_name="shanghaitech", visualize_mode=False, batch_size=1, train_loader_for_eval_check = False): def get_dataloader(train_list, val_list, test_list, dataset_name="shanghaitech", visualize_mode=False, batch_size=1, train_loader_for_eval_check = False):
947 949 if visualize_mode: if visualize_mode:
948 950 transformer = transforms.Compose([ transformer = transforms.Compose([
File data_util/dataset_utils.py changed (mode: 100644) (index 6e232b2..fdb1754)
... ... def my_collate(batch): # batch size 4 [{tensor image, tensor label},{},{},{}] c
14 14 # so how to sample another dataset entry? # so how to sample another dataset entry?
15 15 return torch.utils.data.dataloader.default_collate(batch) return torch.utils.data.dataloader.default_collate(batch)
16 16
17 def flatten_collate(batch):
17 def flatten_collate_broken(batch):
18 18 """ """
19 19
20 :param batch:
20 :param batch: tuple of (data, label)
21 21 :return: :return:
22 22 """ """
23 23 # remove null batch # remove null batch
 
... ... def flatten_collate(batch):
30 30 return out_batch return out_batch
31 31
32 32
33 def flatten_collate(batch):
34 """
35
36 :param batch: tuple of (data, label)
37 :return:
38 """
39 # remove null batch
40 batch = list(filter(lambda x: x is not None, batch))
41
42 # flattening array
43
44 # more clarify version
45 # out_batch = []
46 # for data_pair in batch:
47 # for img, label in zip(*data_pair):
48 # out_batch.append((img, label))
49
50 # python List Comprehensions
51 out_batch = [(img, label) for data_pair in batch for img, label in zip(*data_pair)]
52
53 return out_batch
File data_util/test_dataset_utils.py changed (mode: 100644) (index 3229ece..f62719e)
... ... def test_flatten_collate_should_remove_null():
10 10
11 11
12 12 def test_flatten_list(): def test_flatten_list():
13 in_batch = [["s11", "s12", "s13"], ["s21", "s22", "s23"], ["s31", "s32", "s33"]]
14 out_batch = ["s11", "s12", "s13", "s21", "s22", "s23", "s31", "s32", "s33"]
13 in_batch = [(["d11", "d12", "d13"],["l11", "l12", "l13"]),(["d21", "d22", "d23"],["l21", "l22", "l23"]), (["d31", "d32", "d33"],["l31", "l32", "l33"])]
14 out_batch = [("d11", "l11"), ("d12", "l12"), ("d13", "l13"), ("d21", "l21"), ("d22", "l22"), ("d23", "l23"), ("d31", "l31"), ("d32", "l32"), ("d33", "l33")]
15 15 actual_output = flatten_collate(in_batch) actual_output = flatten_collate(in_batch)
16 16 assert actual_output == out_batch assert actual_output == out_batch
File debug/explore_shb_fatten_list.py copied from file debug/explore_shb.py (similarity 94%) (mode: 100644) (index d6814c7..c9fd3d5)
... ... if __name__ == "__main__":
32 32 test_list = create_image_list(TEST_PATH) test_list = create_image_list(TEST_PATH)
33 33
34 34 train_loader, train_loader_eval, val_loader, test_loader = get_dataloader(train_list, val_list, test_list, train_loader, train_loader_eval, val_loader, test_loader = get_dataloader(train_list, val_list, test_list,
35 dataset_name="shanghaitech_more_random"
35 dataset_name="shanghaitech_non_overlap"
36 36 , batch_size=20, , batch_size=20,
37 37 train_loader_for_eval_check=True) train_loader_for_eval_check=True)
38 38 print(len(train_loader)) print(len(train_loader))
39 39 print(len(val_loader)) print(len(val_loader))
40 40
41 for data, label in val_loader:
42 print(label)
41 for obs in train_loader:
42 print(len(obs))
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main