List of commits:
Subject Hash Author Date (UTC)
jhucrowd_downsample_256 b992d3acc7f1e1aa0ed6265c48e9f065fa1d3085 Thai Thien 2020-09-16 16:02:19
adamw1_bigtail13i_t7_jhu 81e1a8ff445b76f0a9c9c9882a11da65b8b13088 Thai Thien 2020-09-10 18:19:34
adamw1_CompactCNNV7_t1_jhu ed5c3696081241e7ebd7370863ea08ec83467a0b Thai Thien 2020-09-10 18:12:05
no force cache 33946d1f09a1e77c7758acbcd61e385a1800e854 Thai Thien 2020-09-10 17:47:06
skip train eval 186cb449855cb567fc61801bd3a1a4c0767a7d35 Thai Thien 2020-09-10 17:17:10
70 cuda mem but why ? f63f482418b6c85d2a289da9e9e43ca2b50cdd5d Thai Thien 2020-09-10 16:42:00
n_thread int f6243317336e07b9b6177c89ec3072c7d18189c6 Thai Thien 2020-09-10 16:22:36
t3 fix num thread 8981cf96a953c643b3443f39ad73487d2e7e044d Thai Thien 2020-09-10 16:20:28
jhu float32 c0b004d6a733f83e469bb3d48f7e20a9c7113957 Thai Thien 2020-09-10 16:14:09
fix float 8867f5d5e160c38a1a7d67b2fac728cf60e6649e Thai Thien 2020-09-10 16:03:15
try catch empty file e7d392cd6ec807f3ae242ef5090f5725ced9f897 Thai Thien 2020-09-10 15:40:14
case no label at all 51b423b2ee9271e3639a332ecab092944636d8f2 Thai Thien 2020-09-09 13:49:29
remove experiment 06478164cb8e00ed5512a0d4db0dbc6edc1b5ad1 Thai Thien 2020-09-08 15:10:33
val train test 5a43d57ba16667cb96f9514dcf4b81167ff8bd5a Thai Thien 2020-09-08 15:01:51
parallel, try catch excemption 9ac6a36880a113c1820b1e6a702ed5c08ebcb03f Thai Thien 2020-09-08 14:55:08
now show the number of point in log 54a1a16adf32c866ed65808d16da5f997d27b54f Thai Thien 2020-09-07 17:19:18
gt to ground-truth 3996d1c19d68f3669b5f54e5b621635f0b87a9fc Thai Thien 2020-09-07 17:06:15
change data path 5e6605625ba130310fbeaade259e4a5b98987dad Thai Thien 2020-09-07 17:04:43
test train val e817f1d6f4743286735811fff60f64080a8e76ed Thai Thien 2020-09-07 17:02:57
add profiler fac9aa609d6de1e8cd9cb8a8a97f00063aaae310 Thai Thien 2020-09-07 16:59:36
Commit b992d3acc7f1e1aa0ed6265c48e9f065fa1d3085 - jhucrowd_downsample_256
Author: Thai Thien
Author date (UTC): 2020-09-16 16:02
Committer name: Thai Thien
Committer date (UTC): 2020-09-16 16:02
Parent(s): 81e1a8ff445b76f0a9c9c9882a11da65b8b13088
Signing key:
Tree: 8dc103ccb234f46ae997d656cee03d5dfbe1c909
File Lines added Lines deleted
data_flow.py 49 0
train_script/learnstuff/l3/adamw1_ccnnv7_t2_jhu.sh 2 2
File data_flow.py changed (mode: 100644) (index 0418e46..3347278)
... ... def load_data_jhucrowd_256(img_path, train=True, debug=False):
1058 1058 return img, target1 return img, target1
1059 1059
1060 1060
1061 def load_data_jhucrowd_downsample_256(img_path, train=True, debug=False):
1062 """
1063 for jhucrowd
1064 crop fixed 256, allow batch in non-uniform dataset
1065 :param img_path:
1066 :param train:
1067 :return:
1068 """
1069 gt_path = img_path.replace('.jpg', '.h5').replace('images', 'ground-truth-h5')
1070 img_origin = Image.open(img_path).convert('RGB')
1071 # downsample by half
1072 gt_file = h5py.File(gt_path, 'r')
1073 target = np.asarray(gt_file['density']).astype('float32')
1074 downsample_factor = 2
1075 target_factor = 8 / downsample_factor
1076 crop_sq_size = 256 * downsample_factor
1077 if train:
1078 crop_size = (crop_sq_size, crop_sq_size)
1079 dx = int(random.random() * (img_origin.size[0] - crop_sq_size))
1080 dy = int(random.random() * (img_origin.size[1] - crop_sq_size))
1081 if img_origin.size[0] - crop_sq_size < 0 or img_origin.size[1] - crop_sq_size < 0: # we crop more than we can chew, so...
1082 return None, None
1083 img = img_origin.crop((dx, dy, crop_size[0] + dx, crop_size[1] + dy))
1084 img2 = img.resize((int(img_origin.size[0] / 2), int(img_origin.size[1] / 2)), resample=Image.ANTIALIAS)
1085 target = target[dy:crop_size[1] + dy, dx:crop_size[0] + dx]
1086
1087 if random.random() > 0.8:
1088 target = np.fliplr(target)
1089 img2 = img2.transpose(Image.FLIP_LEFT_RIGHT)
1090
1091 if not train:
1092 # get correct people head count from head annotation
1093 txt_path = img_path.replace('.jpg', '.txt').replace('images', 'ground-truth')
1094 gt_count = count_gt_annotation_jhu(txt_path)
1095 img_out = img_origin.resize((int(img_origin.size[0] / 2), int(img_origin.size[1] / 2)), resample=Image.ANTIALIAS)
1096 if debug:
1097 gt_file = h5py.File(gt_path, 'r')
1098 target = np.asarray(gt_file['density'])
1099 return img_origin, gt_count, target
1100 return img_out, gt_count
1101
1102 target1 = cv2.resize(target, (int(target.shape[1] / target_factor), int(target.shape[0] / target_factor)),
1103 interpolation=cv2.INTER_CUBIC) * target_factor * target_factor
1104 # target1 = target1.unsqueeze(0) # make dim (batch size, channel size, x, y) to make model output
1105 target1 = np.expand_dims(target1, axis=0) # make dim (batch size, channel size, x, y) to make model output
1106 return img, target1
1107
1061 1108 def data_augmentation(img, target): def data_augmentation(img, target):
1062 1109 """ """
1063 1110 return 1 pair of img, target after apply augmentation return 1 pair of img, target after apply augmentation
 
... ... class ListDataset(Dataset):
1154 1201 self.load_data_fn = load_data_shanghaitech_180 self.load_data_fn = load_data_shanghaitech_180
1155 1202 elif dataset_name == "shanghaitech_256": elif dataset_name == "shanghaitech_256":
1156 1203 self.load_data_fn = load_data_shanghaitech_256 self.load_data_fn = load_data_shanghaitech_256
1204 elif dataset_name == "jhucrowd_downsample_256":
1205 self.load_data_fn = load_data_jhucrowd_downsample_256
1157 1206 elif dataset_name == "shanghaitech_non_overlap": elif dataset_name == "shanghaitech_non_overlap":
1158 1207 self.load_data_fn = load_data_shanghaitech_non_overlap self.load_data_fn = load_data_shanghaitech_non_overlap
1159 1208 elif dataset_name == "shanghaitech_non_overlap_downsample": elif dataset_name == "shanghaitech_non_overlap_downsample":
File train_script/learnstuff/l3/adamw1_ccnnv7_t2_jhu.sh copied from file train_script/learnstuff/l3/adamw1_ccnnv7_t1_jhu.sh (similarity 87%) (mode: 100644) (index 99fb23e..63fd722)
1 task="adamw1_CompactCNNV7_t1_jhu"
1 task="adamw1_CompactCNNV7_t2_jhu"
2 2
3 3 CUDA_VISIBLE_DEVICES=4 OMP_NUM_THREADS=6 PYTHONWARNINGS="ignore" HTTPS_PROXY="http://10.60.28.99:86" nohup python experiment_main.py \ CUDA_VISIBLE_DEVICES=4 OMP_NUM_THREADS=6 PYTHONWARNINGS="ignore" HTTPS_PROXY="http://10.60.28.99:86" nohup python experiment_main.py \
4 4 --task_id $task \ --task_id $task \
 
... ... CUDA_VISIBLE_DEVICES=4 OMP_NUM_THREADS=6 PYTHONWARNINGS="ignore" HTTPS_PROXY="ht
9 9 --decay 0.1 \ --decay 0.1 \
10 10 --loss_fn "MSEL1Mean" \ --loss_fn "MSEL1Mean" \
11 11 --batch_size 50 \ --batch_size 50 \
12 --datasetname jhucrowd_256 \
12 --datasetname jhucrowd_downsample_256 \
13 13 --optim adamw \ --optim adamw \
14 14 --skip_train_eval \ --skip_train_eval \
15 15 --epochs 401 > logs/$task.log & --epochs 401 > logs/$task.log &
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main