File args_util.py changed (mode: 100644) (index fa52179..3fa2c42) |
... |
... |
def real_args_parse(): |
74 |
74 |
parser.add_argument('--epochs', action="store", default=1, type=int) |
parser.add_argument('--epochs', action="store", default=1, type=int) |
75 |
75 |
|
|
76 |
76 |
# pacnn setting only |
# pacnn setting only |
77 |
|
parser.add_argument('--PACNN_PERSPECTIVE_AWARE_MODEL', action="store_true", default=False) |
|
78 |
|
parser.add_argument('--PACNN_MUTILPLE_SCALE_LOSS', action="store", default=True, |
|
79 |
|
help="True: compare each of density map/perspective map scale with gt for loss." |
|
80 |
|
" False: only compare final density map and final density perspective map") |
|
|
77 |
|
parser.add_argument('--PACNN_PERSPECTIVE_AWARE_MODEL', action="store", default=0, type=int) |
|
78 |
|
parser.add_argument('--PACNN_MUTILPLE_SCALE_LOSS', action="store", default=1, type=int, |
|
79 |
|
help="1: compare each of density map/perspective map scale with gt for loss." |
|
80 |
|
"0: only compare final density map and final density perspective map") |
81 |
81 |
|
|
82 |
82 |
# args.original_lr = 1e-7 |
# args.original_lr = 1e-7 |
83 |
83 |
# args.lr = 1e-7 |
# args.lr = 1e-7 |
File data_flow.py changed (mode: 100644) (index a7b2270..6faebf8) |
... |
... |
def load_data_shanghaitech_pacnn_with_perspective(img_path, train=True): |
118 |
118 |
img = Image.open(img_path).convert('RGB') |
img = Image.open(img_path).convert('RGB') |
119 |
119 |
gt_file = h5py.File(gt_path, 'r') |
gt_file = h5py.File(gt_path, 'r') |
120 |
120 |
target = np.asarray(gt_file['density']) |
target = np.asarray(gt_file['density']) |
121 |
|
perspective = np.array(h5py.File(p_path, "r")['pmap']) |
|
|
121 |
|
perspective = np.array(h5py.File(p_path, "r")['pmap']).astype(np.float32) |
122 |
122 |
perspective = np.rot90(perspective, k=3) |
perspective = np.rot90(perspective, k=3) |
123 |
123 |
if train: |
if train: |
124 |
124 |
crop_size = (int(img.size[0] / 2), int(img.size[1] / 2)) |
crop_size = (int(img.size[0] / 2), int(img.size[1] / 2)) |
File main_pacnn.py changed (mode: 100644) (index 4eee4ac..823774f) |
... |
... |
if __name__ == "__main__": |
24 |
24 |
|
|
25 |
25 |
# Add the following code anywhere in your machine learning file |
# Add the following code anywhere in your machine learning file |
26 |
26 |
experiment = Experiment(api_key="S3mM1eMq6NumMxk2QJAXASkUM", |
experiment = Experiment(api_key="S3mM1eMq6NumMxk2QJAXASkUM", |
27 |
|
project_name="pacnn-dev2", workspace="ttpro1995") |
|
|
27 |
|
project_name="pacnn-dev2", workspace="ttpro1995", disabled=True) |
28 |
28 |
|
|
29 |
29 |
args = real_args_parse() |
args = real_args_parse() |
|
30 |
|
device = "cpu" |
30 |
31 |
print(device) |
print(device) |
31 |
32 |
print(args) |
print(args) |
32 |
33 |
|
|
33 |
34 |
|
|
|
35 |
|
|
34 |
36 |
MODEL_SAVE_NAME = args.task_id |
MODEL_SAVE_NAME = args.task_id |
35 |
37 |
MODEL_SAVE_INTERVAL = 5 |
MODEL_SAVE_INTERVAL = 5 |
36 |
38 |
DATA_PATH = args.input |
DATA_PATH = args.input |
|
... |
... |
if __name__ == "__main__": |
50 |
52 |
experiment.log_parameter("lr", args.lr) |
experiment.log_parameter("lr", args.lr) |
51 |
53 |
|
|
52 |
54 |
# create list |
# create list |
53 |
|
if DATASET_NAME is "shanghaitech": |
|
|
55 |
|
if "shanghaitech" in DATASET_NAME: |
54 |
56 |
TRAIN_PATH = os.path.join(DATA_PATH, "train_data") |
TRAIN_PATH = os.path.join(DATA_PATH, "train_data") |
55 |
57 |
TEST_PATH = os.path.join(DATA_PATH, "test_data") |
TEST_PATH = os.path.join(DATA_PATH, "test_data") |
56 |
58 |
train_list, val_list = get_train_val_list(TRAIN_PATH) |
train_list, val_list = get_train_val_list(TRAIN_PATH) |
57 |
59 |
test_list = create_training_image_list(TEST_PATH) |
test_list = create_training_image_list(TEST_PATH) |
58 |
|
elif DATASET_NAME is "ucf_cc_50": |
|
|
60 |
|
elif "ucf_cc_50" in DATASET_NAME: |
59 |
61 |
train_list, val_list = get_train_val_list(DATA_PATH, test_size=0.2) |
train_list, val_list = get_train_val_list(DATA_PATH, test_size=0.2) |
60 |
62 |
test_list = None |
test_list = None |
61 |
63 |
|
|
|
... |
... |
if __name__ == "__main__": |
70 |
72 |
]), |
]), |
71 |
73 |
train=True, |
train=True, |
72 |
74 |
batch_size=1, |
batch_size=1, |
73 |
|
num_workers=4, dataset_name="shanghaitech_pacnn"), |
|
|
75 |
|
num_workers=4, dataset_name=DATASET_NAME), |
74 |
76 |
batch_size=1, num_workers=4) |
batch_size=1, num_workers=4) |
75 |
77 |
|
|
76 |
78 |
val_loader_pacnn = torch.utils.data.DataLoader( |
val_loader_pacnn = torch.utils.data.DataLoader( |
|
... |
... |
if __name__ == "__main__": |
149 |
151 |
|
|
150 |
152 |
if PACNN_PERSPECTIVE_AWARE_MODEL: |
if PACNN_PERSPECTIVE_AWARE_MODEL: |
151 |
153 |
# TODO: loss for perspective map here |
# TODO: loss for perspective map here |
152 |
|
loss_p = criterion_mse(p, perspective_p) + criterion_ssim(p, perspective_p) |
|
|
154 |
|
pad_p_0 = perspective_p.size()[2] - p.size()[2] |
|
155 |
|
pad_p_1 = perspective_p.size()[3] - p.size()[3] |
|
156 |
|
p_pad = F.pad(p, (0, pad_p_1, 0, pad_p_0), mode='replicate') |
|
157 |
|
|
|
158 |
|
loss_p = criterion_mse(p_pad, perspective_p) + criterion_ssim(p_pad, perspective_p) |
|
159 |
|
|
153 |
160 |
loss += loss_p |
loss += loss_p |
154 |
161 |
if PACNN_MUTILPLE_SCALE_LOSS: |
if PACNN_MUTILPLE_SCALE_LOSS: |
155 |
|
loss_p_s = criterion_mse(p_s, perspective_s) + criterion_ssim(p_s, perspective_s) |
|
|
162 |
|
pad_s_0 = perspective_s.size()[2] - p_s.size()[2] |
|
163 |
|
pad_s_1 = perspective_s.size()[3] - p_s.size()[3] |
|
164 |
|
p_s_pad = F.pad(perspective_s, (0, pad_s_1, 0, pad_s_0), |
|
165 |
|
mode='replicate') |
|
166 |
|
|
|
167 |
|
loss_p_s = criterion_mse(p_s_pad, perspective_s) + criterion_ssim(p_s_pad, perspective_s) |
156 |
168 |
loss += loss_p_s |
loss += loss_p_s |
157 |
169 |
|
|
158 |
170 |
# what is this, loss_d count 2 ? |
# what is this, loss_d count 2 ? |
File train_script/train_pacnn_shanghaitechA.sh changed (mode: 100644) (index f3944dd..a405a96) |
48 |
48 |
#--task_id train_state1_attemp7 |
#--task_id train_state1_attemp7 |
49 |
49 |
|
|
50 |
50 |
#### no loss for d1, d2, d3 but only count d_final |
#### no loss for d1, d2, d3 but only count d_final |
|
51 |
|
#python main_pacnn.py \ |
|
52 |
|
#--input data/ShanghaiTech/part_A \ |
|
53 |
|
#--load_model saved_model/train_state1_attemp7_180_checkpoint.pth.tar \ |
|
54 |
|
#--epochs 300 \ |
|
55 |
|
#--lr 1e-9 \ |
|
56 |
|
#--PACNN_MUTILPLE_SCALE_LOSS False \ |
|
57 |
|
#--task_id train_state1_attemp8_finalloss |
|
58 |
|
|
|
59 |
|
#################### |
|
60 |
|
|
51 |
61 |
python main_pacnn.py \ |
python main_pacnn.py \ |
52 |
62 |
--input data/ShanghaiTech/part_A \ |
--input data/ShanghaiTech/part_A \ |
53 |
63 |
--load_model saved_model/train_state1_attemp7_180_checkpoint.pth.tar \ |
--load_model saved_model/train_state1_attemp7_180_checkpoint.pth.tar \ |
54 |
|
--epochs 300 \ |
|
|
64 |
|
--epochs 500 \ |
55 |
65 |
--lr 1e-9 \ |
--lr 1e-9 \ |
56 |
|
--PACNN_MUTILPLE_SCALE_LOSS False \ |
|
57 |
|
--task_id train_state1_attemp8_finalloss |
|
|
66 |
|
--PACNN_PERSPECTIVE_AWARE_MODEL 1 \ |
|
67 |
|
--PACNN_MUTILPLE_SCALE_LOSS 1 \ |
|
68 |
|
--task_id train_state2_attemp1 |
|
69 |
|
|
|
70 |
|
|
|
71 |
|
#--input data/ShanghaiTech/part_A \ |
|
72 |
|
#--load_model saved_model/train_state1_attemp7_180_checkpoint.pth.tar |
|
73 |
|
#--epochs 500 |
|
74 |
|
#--lr 1e-9 |
|
75 |
|
#--PACNN_PERSPECTIVE_AWARE_MODEL 1 |
|
76 |
|
#--PACNN_MUTILPLE_SCALE_LOSS 1 |
|
77 |
|
#--task_id dev |