List of commits:
Subject Hash Author Date (UTC)
jhu adamw1_bigtail13i_t1_jhu 77bb789fa2ffadfc07ec6f99e1bcf44bde24bfc3 Thai Thien 2020-09-07 16:38:13
train_dev_test cbfc478aaf7db3accb41a763b0818910999d25a5 Thai Thien 2020-09-06 17:29:37
seem working c44af597b7adf98ecaf3a71e565de8688f7ff6ba Thai Thien 2020-09-06 15:55:28
done single sample cee46d309e9bb91ac4185b0e1b74deddefbc8553 Thai Thien 2020-09-06 15:24:39
a ec2103c6617ad84375e5a7a0f3cde3b32ab012a0 Thai Thien 2020-08-23 15:39:51
pred_count d9168903486d7a88c0763a8e93b18d1ed89d2d40 Thai Thien 2020-08-23 15:37:02
density_map_count = gt_density.detach().sum() 015c7ab3a1734c6002065f775f90870b6c5bdbd5 Thai Thien 2020-08-23 15:35:31
no more eval, only one in function 316b330c481224bb4264ddebb3dda2e317c32ac1 Thai Thien 2020-08-23 15:34:21
None type eval 15a12c6dd1a6f828f5e852faecaf2a812dfddce8 Thai Thien 2020-08-23 15:30:17
run experiment on shb truncate 4 3faae2b49ed074fdb96fc20e916fee9eeac4f92f Thai Thien 2020-08-23 15:20:23
evaluatuion shb b80eac051649c36ea1631cc4701e6d1d587d7887 Thai Thien 2020-08-23 09:26:45
fix evaluation shb 51cbe92724973f64cb046f7f49fb1976400827e4 Thai Thien 2020-08-23 09:18:35
typo 1cba17e02cc79ee73c4ad5c9f1faab4913f92b01 Thai Thien 2020-08-23 08:53:21
fix file, mae, mse 245396d814f5d83dff1fd1ecc9fcd403be1805cb Thai Thien 2020-08-23 08:52:16
file name strage stuff caeb9f9608e91cf6a1323d9ae9f4fb215c4dd6ea Thai Thien 2020-08-23 08:44:54
TypeError: can only concatenate str (not "list") to str e8aebcfb782966c11c3ca116b5a9cc254021a73d Thai Thien 2020-08-23 08:41:02
key error 5732100d6aca8e3fea6a4d25270edefbc8148a2a Thai Thien 2020-08-23 08:39:09
fix target fdbda2c6923dd164560448a445cd64ff413fc804 Thai Thien 2020-08-23 08:37:48
test path 06d268f873e6ceea93a8e8741d819a03b324cedb Thai Thien 2020-08-23 08:27:38
a 1d73d926894edbc600db678316b9b24a583c4cb8 Thai Thien 2020-08-23 08:25:58
Commit 77bb789fa2ffadfc07ec6f99e1bcf44bde24bfc3 - jhu adamw1_bigtail13i_t1_jhu
Author: Thai Thien
Author date (UTC): 2020-09-07 16:38
Committer name: Thai Thien
Committer date (UTC): 2020-09-07 16:38
Parent(s): cbfc478aaf7db3accb41a763b0818910999d25a5
Signer:
Signing key:
Signing status: N
Tree: 1e5cf6a52168c75ab4f239a712176dfe19520589
File Lines added Lines deleted
data_flow.py 60 3
train_script/learnstuff/l3/adamw1_bigtail13i_t1_jhu.sh 5 6
File data_flow.py changed (mode: 100644) (index 92c16da..4499e05)
... ... import torch
15 15 import numpy as np import numpy as np
16 16 from torch.utils.data import Dataset from torch.utils.data import Dataset
17 17 from PIL import Image from PIL import Image
18 import pandas as pd
18 19 import torchvision.transforms.functional as F import torchvision.transforms.functional as F
19 20 from torchvision import datasets, transforms from torchvision import datasets, transforms
20 21 import scipy.io # import scipy does not work https://stackoverflow.com/questions/11172623/import-problems-with-scipy-io import scipy.io # import scipy does not work https://stackoverflow.com/questions/11172623/import-problems-with-scipy-io
 
... ... def count_gt_annotation_sha(mat_path):
35 36 gt = mat["image_info"][0, 0][0, 0][0] gt = mat["image_info"][0, 0][0, 0][0]
36 37 return len(gt) return len(gt)
37 38
39 def count_gt_annotation_jhu(txt_path):
40 """
41 read the annotation and count number of head from annotation
42 :param mat_path:
43 :return: count
44 """
45 df = pd.read_csv(txt_path, sep=" ", header=None)
46 p = df.to_numpy()
47 return len(p)
48
49
38 50
39 51 def create_training_image_list(data_path): def create_training_image_list(data_path):
40 52 """ """
 
... ... def load_data_ucf_cc50_pacnn(img_path, train=True):
997 1009 return img, (target1, target2, target3) return img, (target1, target2, target3)
998 1010
999 1011
1012 def load_data_jhucrowd_256(img_path, train=True, debug=False):
1013 """
1014 for jhucrowd
1015 crop fixed 256, allow batch in non-uniform dataset
1016 :param img_path:
1017 :param train:
1018 :return:
1019 """
1020 gt_path = img_path.replace('.jpg', '.h5').replace('images', 'ground-truth-h5')
1021 img_origin = Image.open(img_path).convert('RGB')
1022 gt_file = h5py.File(gt_path, 'r')
1023 target = np.asarray(gt_file['density'])
1024 target_factor = 8
1025 crop_sq_size = 256
1026 if train:
1027 crop_size = (crop_sq_size, crop_sq_size)
1028 dx = int(random.random() * (img_origin.size[0] - crop_sq_size))
1029 dy = int(random.random() * (img_origin.size[1] - crop_sq_size))
1030 if img_origin.size[0] - crop_sq_size < 0 or img_origin.size[1] - crop_sq_size < 0: # we crop more than we can chew, so...
1031 return None, None
1032 img = img_origin.crop((dx, dy, crop_size[0] + dx, crop_size[1] + dy))
1033 target = target[dy:crop_size[1] + dy, dx:crop_size[0] + dx]
1034
1035 if random.random() > 0.8:
1036 target = np.fliplr(target)
1037 img = img.transpose(Image.FLIP_LEFT_RIGHT)
1038
1039 if not train:
1040 # get correct people head count from head annotation
1041 txt_path = img_path.replace('.jpg', '.txt').replace('images', 'ground-truth')
1042 gt_count = count_gt_annotation_jhu(txt_path)
1043 if debug:
1044 gt_file = h5py.File(gt_path, 'r')
1045 target = np.asarray(gt_file['density'])
1046 return img_origin, gt_count, target
1047 return img_origin, gt_count
1048
1049 target1 = cv2.resize(target, (int(target.shape[1] / target_factor), int(target.shape[0] / target_factor)),
1050 interpolation=cv2.INTER_CUBIC) * target_factor * target_factor
1051 # target1 = target1.unsqueeze(0) # make dim (batch size, channel size, x, y) to make model output
1052 target1 = np.expand_dims(target1, axis=0) # make dim (batch size, channel size, x, y) to make model output
1053 return img, target1
1054
1055
1000 1056 def data_augmentation(img, target): def data_augmentation(img, target):
1001 1057 """ """
1002 1058 return 1 pair of img, target after apply augmentation return 1 pair of img, target after apply augmentation
 
... ... class ListDataset(Dataset):
1099 1155 self.load_data_fn = load_data_shanghaitech_non_overlap_downsample self.load_data_fn = load_data_shanghaitech_non_overlap_downsample
1100 1156 elif dataset_name == "shanghaitech_flip_only": elif dataset_name == "shanghaitech_flip_only":
1101 1157 self.load_data_fn = load_data_shanghaitech_flip_only self.load_data_fn = load_data_shanghaitech_flip_only
1102
1158 elif dataset_name == "jhucrowd_256":
1159 self.load_data_fn = load_data_jhucrowd_256
1103 1160 elif dataset_name == "ucf_cc_50": elif dataset_name == "ucf_cc_50":
1104 1161 self.load_data_fn = load_data_ucf_cc50 self.load_data_fn = load_data_ucf_cc50
1105 1162 elif dataset_name == "ucf_cc_50_pacnn": elif dataset_name == "ucf_cc_50_pacnn":
 
... ... def get_dataloader(train_list, val_list, test_list, dataset_name="shanghaitech",
1214 1271 transform=transformer, transform=transformer,
1215 1272 train=False, train=False,
1216 1273 debug=debug, debug=debug,
1217 dataset_name=dataset_name, cache=cache),
1274 dataset_name=dataset_name, cache=True), # evaluation set always cache
1218 1275 num_workers=0, num_workers=0,
1219 1276 batch_size=test_size, batch_size=test_size,
1220 1277 pin_memory=pin_memory) pin_memory=pin_memory)
 
... ... def get_dataloader(train_list, val_list, test_list, dataset_name="shanghaitech",
1228 1285 transform=transformer, transform=transformer,
1229 1286 train=False, train=False,
1230 1287 debug=debug, debug=debug,
1231 dataset_name=dataset_name),
1288 dataset_name=dataset_name, cache=True), # evaluation set always cache
1232 1289 num_workers=0, num_workers=0,
1233 1290 batch_size=test_size, batch_size=test_size,
1234 1291 pin_memory=pin_memory) pin_memory=pin_memory)
File train_script/learnstuff/l3/adamw1_bigtail13i_t1_jhu.sh copied from file train_script/learnstuff/l1/adamw1_bigtail13i_t1_shb.sh (similarity 55%) (mode: 100644) (index cd3f8e4..b6dc13c)
1 task="adamw1_bigtail13i_t1_shb"
1 task="adamw1_bigtail13i_t1_jhu"
2 2
3 3 CUDA_VISIBLE_DEVICES=3 OMP_NUM_THREADS=2 PYTHONWARNINGS="ignore" HTTPS_PROXY="http://10.60.28.99:86" nohup python experiment_main.py \ CUDA_VISIBLE_DEVICES=3 OMP_NUM_THREADS=2 PYTHONWARNINGS="ignore" HTTPS_PROXY="http://10.60.28.99:86" nohup python experiment_main.py \
4 4 --task_id $task \ --task_id $task \
5 --note "adamW with extrem high lr and decay, msel1mean" \
5 --note "adamW with extrem high lr and decay, msel1mean on jhu" \
6 6 --model "BigTail13i" \ --model "BigTail13i" \
7 --input /data/rnd/thient/thient_data/shanghaitech_with_people_density_map/ShanghaiTech_3/part_B \
7 --input /data/rnd/thient/thient_data/jhu_crowd_plusplus \
8 8 --lr 1e-3 \ --lr 1e-3 \
9 9 --decay 0.1 \ --decay 0.1 \
10 10 --loss_fn "MSEL1Mean" \ --loss_fn "MSEL1Mean" \
11 --batch_size 5 \
12 --datasetname shanghaitech_non_overlap \
11 --batch_size 40 \
12 --datasetname jhucrowd_256 \
13 13 --optim adamw \ --optim adamw \
14 --cache \
15 14 --epochs 1201 > logs/$task.log & --epochs 1201 > logs/$task.log &
16 15
17 16 echo logs/$task.log # for convenience echo logs/$task.log # for convenience
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main