List of commits:
Subject Hash Author Date (UTC)
fix padding with p 86c2fa07822d956a34b3b37e14da485a4249f01b Thai Thien 2019-10-06 02:52:58
pacnn perspective loss fb673e38a5f24ae9004fe2b7b93c88991e0c2304 Thai Thien 2019-10-06 01:38:28
data_flow shanghaitech_pacnn_with_perspective seem working 91d350a06f358e03223966297d124daee94123d0 Thai Thien 2019-10-06 01:31:11
multiscale loss and final loss only mode c65dd0e74ad28503821e5c8651a3b47b4a0c7c64 Thai Thien 2019-10-05 15:58:19
wip : perspective map eac63f2671dc5b064753acc4f40bf0f9f216ad2a Thai Thien 2019-10-04 16:26:56
shell script f2106e700b6f6174d4dd276f25ec6f3d9ff239bb thient 2019-10-04 07:42:51
WIP 42c7c8e1d772fbbda61a4bdf9e329f74e1efb600 tthien 2019-10-03 17:52:47
add readme 580cf43d1edddd67b1f6a2c57fdd5cee3dba925c Thai Thien 2019-10-02 17:44:49
update script, debug ddb68b95389be1c1d398118677dd227a8bb2b70b Thai Thien 2019-10-02 15:52:31
add d (output density map) to loss function) a0c71bf4bf2ab7393d60b06a84db8dfbbfb1a6c2 tthien 2019-09-30 16:32:39
fix the args, add save interval for model, so we don't save them all 9fdf9daa2ac4bd12b7b62521d81e520db0debd01 tthien 2019-09-30 16:30:00
meow 1ad19a22a310992e27a26471feeb37375124d075 tthien 2019-09-29 18:25:43
fix pacnn perspective map 453ece3ccb818889ba895bfc4285f7905d33cba5 Thai Thien 2019-09-25 17:20:33
apex not work so well da8c0dd57297f972201f31d57e66897177922f48 Thai Thien 2019-09-24 17:25:59
fix data loader pacnn so it will scale up with correct number of people 11d55b50d764511f2491291f0208fee0905dec49 Thai Thien 2019-09-24 15:40:56
add comet ml a9d4b89ce594f5e241168ccafdcdf0f150ea0ebb Thai Thien 2019-09-23 17:07:58
fix pacnn avg schema c2140a96886195782e5689c24aeeb4fe7a2db7ad Thai Thien 2019-09-22 17:35:01
debug number not divisible by 8 a568fd7f294a8bd31b3db78437b4b6b51b5b41b9 Thai Thien 2019-09-22 04:36:06
pacnn 967074890d14ab0eefc277801860270a468e8f9f Thai Thien 2019-09-22 03:54:48
wip: pacnn 2192d7c7b449fecf3868877d9cfbc09bb6f7ae98 Thai Thien 2019-09-22 03:44:56
Commit 86c2fa07822d956a34b3b37e14da485a4249f01b - fix padding with p
Author: Thai Thien
Author date (UTC): 2019-10-06 02:52
Committer name: Thai Thien
Committer date (UTC): 2019-10-06 02:52
Parent(s): fb673e38a5f24ae9004fe2b7b93c88991e0c2304
Signer:
Signing key:
Signing status: N
Tree: f25ae2d50bff301267fbffb8375598d616cad8d6
File Lines added Lines deleted
args_util.py 4 4
data_flow.py 1 1
main_pacnn.py 18 6
train_script/train_pacnn_shanghaitechA.sh 23 3
File args_util.py changed (mode: 100644) (index fa52179..3fa2c42)
... ... def real_args_parse():
74 74 parser.add_argument('--epochs', action="store", default=1, type=int) parser.add_argument('--epochs', action="store", default=1, type=int)
75 75
76 76 # pacnn setting only # pacnn setting only
77 parser.add_argument('--PACNN_PERSPECTIVE_AWARE_MODEL', action="store_true", default=False)
78 parser.add_argument('--PACNN_MUTILPLE_SCALE_LOSS', action="store", default=True,
79 help="True: compare each of density map/perspective map scale with gt for loss."
80 " False: only compare final density map and final density perspective map")
77 parser.add_argument('--PACNN_PERSPECTIVE_AWARE_MODEL', action="store", default=0, type=int)
78 parser.add_argument('--PACNN_MUTILPLE_SCALE_LOSS', action="store", default=1, type=int,
79 help="1: compare each of density map/perspective map scale with gt for loss."
80 "0: only compare final density map and final density perspective map")
81 81
82 82 # args.original_lr = 1e-7 # args.original_lr = 1e-7
83 83 # args.lr = 1e-7 # args.lr = 1e-7
File data_flow.py changed (mode: 100644) (index a7b2270..6faebf8)
... ... def load_data_shanghaitech_pacnn_with_perspective(img_path, train=True):
118 118 img = Image.open(img_path).convert('RGB') img = Image.open(img_path).convert('RGB')
119 119 gt_file = h5py.File(gt_path, 'r') gt_file = h5py.File(gt_path, 'r')
120 120 target = np.asarray(gt_file['density']) target = np.asarray(gt_file['density'])
121 perspective = np.array(h5py.File(p_path, "r")['pmap'])
121 perspective = np.array(h5py.File(p_path, "r")['pmap']).astype(np.float32)
122 122 perspective = np.rot90(perspective, k=3) perspective = np.rot90(perspective, k=3)
123 123 if train: if train:
124 124 crop_size = (int(img.size[0] / 2), int(img.size[1] / 2)) crop_size = (int(img.size[0] / 2), int(img.size[1] / 2))
File main_pacnn.py changed (mode: 100644) (index 4eee4ac..823774f)
... ... if __name__ == "__main__":
24 24
25 25 # Add the following code anywhere in your machine learning file # Add the following code anywhere in your machine learning file
26 26 experiment = Experiment(api_key="S3mM1eMq6NumMxk2QJAXASkUM", experiment = Experiment(api_key="S3mM1eMq6NumMxk2QJAXASkUM",
27 project_name="pacnn-dev2", workspace="ttpro1995")
27 project_name="pacnn-dev2", workspace="ttpro1995", disabled=True)
28 28
29 29 args = real_args_parse() args = real_args_parse()
30 device = "cpu"
30 31 print(device) print(device)
31 32 print(args) print(args)
32 33
33 34
35
34 36 MODEL_SAVE_NAME = args.task_id MODEL_SAVE_NAME = args.task_id
35 37 MODEL_SAVE_INTERVAL = 5 MODEL_SAVE_INTERVAL = 5
36 38 DATA_PATH = args.input DATA_PATH = args.input
 
... ... if __name__ == "__main__":
50 52 experiment.log_parameter("lr", args.lr) experiment.log_parameter("lr", args.lr)
51 53
52 54 # create list # create list
53 if DATASET_NAME is "shanghaitech":
55 if "shanghaitech" in DATASET_NAME:
54 56 TRAIN_PATH = os.path.join(DATA_PATH, "train_data") TRAIN_PATH = os.path.join(DATA_PATH, "train_data")
55 57 TEST_PATH = os.path.join(DATA_PATH, "test_data") TEST_PATH = os.path.join(DATA_PATH, "test_data")
56 58 train_list, val_list = get_train_val_list(TRAIN_PATH) train_list, val_list = get_train_val_list(TRAIN_PATH)
57 59 test_list = create_training_image_list(TEST_PATH) test_list = create_training_image_list(TEST_PATH)
58 elif DATASET_NAME is "ucf_cc_50":
60 elif "ucf_cc_50" in DATASET_NAME:
59 61 train_list, val_list = get_train_val_list(DATA_PATH, test_size=0.2) train_list, val_list = get_train_val_list(DATA_PATH, test_size=0.2)
60 62 test_list = None test_list = None
61 63
 
... ... if __name__ == "__main__":
70 72 ]), ]),
71 73 train=True, train=True,
72 74 batch_size=1, batch_size=1,
73 num_workers=4, dataset_name="shanghaitech_pacnn"),
75 num_workers=4, dataset_name=DATASET_NAME),
74 76 batch_size=1, num_workers=4) batch_size=1, num_workers=4)
75 77
76 78 val_loader_pacnn = torch.utils.data.DataLoader( val_loader_pacnn = torch.utils.data.DataLoader(
 
... ... if __name__ == "__main__":
149 151
150 152 if PACNN_PERSPECTIVE_AWARE_MODEL: if PACNN_PERSPECTIVE_AWARE_MODEL:
151 153 # TODO: loss for perspective map here # TODO: loss for perspective map here
152 loss_p = criterion_mse(p, perspective_p) + criterion_ssim(p, perspective_p)
154 pad_p_0 = perspective_p.size()[2] - p.size()[2]
155 pad_p_1 = perspective_p.size()[3] - p.size()[3]
156 p_pad = F.pad(p, (0, pad_p_1, 0, pad_p_0), mode='replicate')
157
158 loss_p = criterion_mse(p_pad, perspective_p) + criterion_ssim(p_pad, perspective_p)
159
153 160 loss += loss_p loss += loss_p
154 161 if PACNN_MUTILPLE_SCALE_LOSS: if PACNN_MUTILPLE_SCALE_LOSS:
155 loss_p_s = criterion_mse(p_s, perspective_s) + criterion_ssim(p_s, perspective_s)
162 pad_s_0 = perspective_s.size()[2] - p_s.size()[2]
163 pad_s_1 = perspective_s.size()[3] - p_s.size()[3]
164 p_s_pad = F.pad(perspective_s, (0, pad_s_1, 0, pad_s_0),
165 mode='replicate')
166
167 loss_p_s = criterion_mse(p_s_pad, perspective_s) + criterion_ssim(p_s_pad, perspective_s)
156 168 loss += loss_p_s loss += loss_p_s
157 169
158 170 # what is this, loss_d count 2 ? # what is this, loss_d count 2 ?
File train_script/train_pacnn_shanghaitechA.sh changed (mode: 100644) (index f3944dd..a405a96)
48 48 #--task_id train_state1_attemp7 #--task_id train_state1_attemp7
49 49
50 50 #### no loss for d1, d2, d3 but only count d_final #### no loss for d1, d2, d3 but only count d_final
51 #python main_pacnn.py \
52 #--input data/ShanghaiTech/part_A \
53 #--load_model saved_model/train_state1_attemp7_180_checkpoint.pth.tar \
54 #--epochs 300 \
55 #--lr 1e-9 \
56 #--PACNN_MUTILPLE_SCALE_LOSS False \
57 #--task_id train_state1_attemp8_finalloss
58
59 ####################
60
51 61 python main_pacnn.py \ python main_pacnn.py \
52 62 --input data/ShanghaiTech/part_A \ --input data/ShanghaiTech/part_A \
53 63 --load_model saved_model/train_state1_attemp7_180_checkpoint.pth.tar \ --load_model saved_model/train_state1_attemp7_180_checkpoint.pth.tar \
54 --epochs 300 \
64 --epochs 500 \
55 65 --lr 1e-9 \ --lr 1e-9 \
56 --PACNN_MUTILPLE_SCALE_LOSS False \
57 --task_id train_state1_attemp8_finalloss
66 --PACNN_PERSPECTIVE_AWARE_MODEL 1 \
67 --PACNN_MUTILPLE_SCALE_LOSS 1 \
68 --task_id train_state2_attemp1
69
70
71 #--input data/ShanghaiTech/part_A \
72 #--load_model saved_model/train_state1_attemp7_180_checkpoint.pth.tar
73 #--epochs 500
74 #--lr 1e-9
75 #--PACNN_PERSPECTIVE_AWARE_MODEL 1
76 #--PACNN_MUTILPLE_SCALE_LOSS 1
77 #--task_id dev
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main