List of commits:
Subject Hash Author Date (UTC)
adamw1_CompactCNNV7_t5_jhu 40720162c9e9b731fb852ed7e3e191228e421ced Thai Thien 2020-09-16 17:46:20
test_data cache 73633cbd6d70448268a3e1534440601ddcf75276 Thai Thien 2020-09-16 17:12:57
fix test data not exist 474f506204251e26825801499f6bc503f44f8410 Thai Thien 2020-09-16 16:41:00
fix test data not exist 6377e18d87e0fabbf9b4ed143e26ae5912c0b872 Thai Thien 2020-09-16 16:40:06
jhucrowd_downsample_512 30b8b9a63e93a5631f003858ef2ab63dc62c6563 Thai Thien 2020-09-16 16:17:02
fix 77d74f09874a7be0d8fcec141597be4ce331bf0c Thai Thien 2020-09-16 16:08:39
fix 897856c014a517da679a8a1c46c6d0b90c965876 Thai Thien 2020-09-16 16:05:29
jhucrowd_downsample_256 b992d3acc7f1e1aa0ed6265c48e9f065fa1d3085 Thai Thien 2020-09-16 16:02:19
adamw1_bigtail13i_t7_jhu 81e1a8ff445b76f0a9c9c9882a11da65b8b13088 Thai Thien 2020-09-10 18:19:34
adamw1_CompactCNNV7_t1_jhu ed5c3696081241e7ebd7370863ea08ec83467a0b Thai Thien 2020-09-10 18:12:05
no force cache 33946d1f09a1e77c7758acbcd61e385a1800e854 Thai Thien 2020-09-10 17:47:06
skip train eval 186cb449855cb567fc61801bd3a1a4c0767a7d35 Thai Thien 2020-09-10 17:17:10
70 cuda mem but why ? f63f482418b6c85d2a289da9e9e43ca2b50cdd5d Thai Thien 2020-09-10 16:42:00
n_thread int f6243317336e07b9b6177c89ec3072c7d18189c6 Thai Thien 2020-09-10 16:22:36
t3 fix num thread 8981cf96a953c643b3443f39ad73487d2e7e044d Thai Thien 2020-09-10 16:20:28
jhu float32 c0b004d6a733f83e469bb3d48f7e20a9c7113957 Thai Thien 2020-09-10 16:14:09
fix float 8867f5d5e160c38a1a7d67b2fac728cf60e6649e Thai Thien 2020-09-10 16:03:15
try catch empty file e7d392cd6ec807f3ae242ef5090f5725ced9f897 Thai Thien 2020-09-10 15:40:14
case no label at all 51b423b2ee9271e3639a332ecab092944636d8f2 Thai Thien 2020-09-09 13:49:29
remove experiment 06478164cb8e00ed5512a0d4db0dbc6edc1b5ad1 Thai Thien 2020-09-08 15:10:33
Commit 40720162c9e9b731fb852ed7e3e191228e421ced - adamw1_CompactCNNV7_t5_jhu
Author: Thai Thien
Author date (UTC): 2020-09-16 17:46
Committer name: Thai Thien
Committer date (UTC): 2020-09-16 17:46
Parent(s): 73633cbd6d70448268a3e1534440601ddcf75276
Signing key:
Tree: ec854fe4f2a3074fcd3ca5ef45ce24c60f0ad157
File Lines added Lines deleted
data_flow.py 53 0
train_script/learnstuff/l3/adamw1_ccnnv7_t5_jhu.sh 6 6
File data_flow.py changed (mode: 100644) (index 472f1ec..c4ebf96)
... ... def load_data_jhucrowd_downsample_512(img_path, train=True, debug=False):
1108 1108 target1 = np.expand_dims(target1, axis=0) # make dim (batch size, channel size, x, y) to make model output target1 = np.expand_dims(target1, axis=0) # make dim (batch size, channel size, x, y) to make model output
1109 1109 return img2, target1 return img2, target1
1110 1110
1111
1112
1113 def load_data_jhucrowd_downsample_testonly_512(img_path, train=True, debug=False):
1114 """
1115 for jhucrowd
1116 crop fixed 256, allow batch in non-uniform dataset
1117 :param img_path:
1118 :param train:
1119 :return:
1120 """
1121 gt_path = img_path.replace('.jpg', '.h5').replace('images', 'ground-truth-h5')
1122 img_origin = Image.open(img_path).convert('RGB')
1123
1124 downsample_rate = 2
1125 target_factor = 8
1126 crop_sq_size = 512
1127
1128 if train:
1129 gt_file = h5py.File(gt_path, 'r')
1130 target = np.asarray(gt_file['density']).astype('float32')
1131 crop_size = (crop_sq_size, crop_sq_size)
1132 dx = int(random.random() * (img_origin.size[0] - crop_sq_size))
1133 dy = int(random.random() * (img_origin.size[1] - crop_sq_size))
1134 if img_origin.size[0] - crop_sq_size < 0 or img_origin.size[1] - crop_sq_size < 0: # we crop more than we can chew, so...
1135 return None, None
1136 img = img_origin.crop((dx, dy, crop_size[0] + dx, crop_size[1] + dy))
1137
1138 target = target[dy:crop_size[1] + dy, dx:crop_size[0] + dx]
1139
1140 if random.random() > 0.8:
1141 target = np.fliplr(target)
1142 img = img.transpose(Image.FLIP_LEFT_RIGHT)
1143
1144 if not train:
1145 # get correct people head count from head annotation
1146 txt_path = img_path.replace('.jpg', '.txt').replace('images', 'ground-truth')
1147 gt_count = count_gt_annotation_jhu(txt_path)
1148 img_eval = img_origin.resize((int(img_origin.size[0] / downsample_rate), int(img_origin.size[1] / downsample_rate)),
1149 resample=Image.ANTIALIAS)
1150 if debug:
1151 gt_file = h5py.File(gt_path, 'r')
1152 target = np.asarray(gt_file['density'])
1153 return img_eval, gt_count, target
1154 return img_eval, gt_count
1155
1156 target1 = cv2.resize(target, (int(target.shape[1] / target_factor), int(target.shape[0] / target_factor)),
1157 interpolation=cv2.INTER_CUBIC) * target_factor * target_factor
1158 # target1 = target1.unsqueeze(0) # make dim (batch size, channel size, x, y) to make model output
1159 target1 = np.expand_dims(target1, axis=0) # make dim (batch size, channel size, x, y) to make model output
1160 return img, target1
1161
1111 1162 def data_augmentation(img, target): def data_augmentation(img, target):
1112 1163 """ """
1113 1164 return 1 pair of img, target after apply augmentation return 1 pair of img, target after apply augmentation
 
... ... class ListDataset(Dataset):
1206 1257 self.load_data_fn = load_data_shanghaitech_256 self.load_data_fn = load_data_shanghaitech_256
1207 1258 elif dataset_name == "jhucrowd_downsample_512": elif dataset_name == "jhucrowd_downsample_512":
1208 1259 self.load_data_fn = load_data_jhucrowd_downsample_512 self.load_data_fn = load_data_jhucrowd_downsample_512
1260 elif dataset_name == "jhucrowd_downsample_testonly_512":
1261 self.load_data_fn = load_data_jhucrowd_downsample_testonly_512
1209 1262 elif dataset_name == "shanghaitech_non_overlap": elif dataset_name == "shanghaitech_non_overlap":
1210 1263 self.load_data_fn = load_data_shanghaitech_non_overlap self.load_data_fn = load_data_shanghaitech_non_overlap
1211 1264 elif dataset_name == "shanghaitech_non_overlap_downsample": elif dataset_name == "shanghaitech_non_overlap_downsample":
File train_script/learnstuff/l3/adamw1_ccnnv7_t5_jhu.sh copied from file train_script/learnstuff/l3/adamw1_ccnnv7_t1_jhu.sh (similarity 52%) (mode: 100644) (index 99fb23e..56454be)
1 task="adamw1_CompactCNNV7_t1_jhu"
1 task="adamw1_CompactCNNV7_t5_jhu"
2 2
3 CUDA_VISIBLE_DEVICES=4 OMP_NUM_THREADS=6 PYTHONWARNINGS="ignore" HTTPS_PROXY="http://10.60.28.99:86" nohup python experiment_main.py \
3 CUDA_VISIBLE_DEVICES=5 OMP_NUM_THREADS=6 PYTHONWARNINGS="ignore" HTTPS_PROXY="http://10.60.28.99:86" nohup python experiment_main.py \
4 4 --task_id $task \ --task_id $task \
5 --note "adamW with extrem high lr and decay, msel1mean on jhu, no more force eval cache" \
5 --note "downsample testonly" \
6 6 --model "CompactCNNV7" \ --model "CompactCNNV7" \
7 7 --input /data/rnd/thient/thient_data/jhu_crowd_plusplus \ --input /data/rnd/thient/thient_data/jhu_crowd_plusplus \
8 8 --lr 1e-3 \ --lr 1e-3 \
9 9 --decay 0.1 \ --decay 0.1 \
10 10 --loss_fn "MSEL1Mean" \ --loss_fn "MSEL1Mean" \
11 --batch_size 50 \
12 --datasetname jhucrowd_256 \
11 --batch_size 30 \
12 --datasetname jhucrowd_downsample_testonly_512 \
13 13 --optim adamw \ --optim adamw \
14 14 --skip_train_eval \ --skip_train_eval \
15 --epochs 401 > logs/$task.log &
15 --epochs 201 > logs/$task.log &
16 16
17 17 echo logs/$task.log # for convenience echo logs/$task.log # for convenience
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main