List of commits:
Subject Hash Author Date (UTC)
shuffle non-overlap, msel1 loss 8ef0edfc8b459326b4c7a7044bc6dc6d30fd7fd6 Thai Thien 2020-07-13 10:48:54
MSEL1Loss e2f4dc7bb1bee096ae17101331e0f221d05ba1af Thai Thien 2020-07-13 09:34:11
t10 2f443c077e186901cdbe2c9faf27d2f37905a775 Thai Thien 2020-07-12 18:57:39
t10 87d6dd6e7debf45e16953e39471c13071039fa59 Thai Thien 2020-07-12 18:57:17
fix 45617766d305df841b7b0af3c2f6204a2e6d4f46 Thai Thien 2020-07-12 18:45:01
ccnn_v7_t8_shb 9f921d9415b89d9c9d4f4e8ef9b65b32663e06ec Thai Thien 2020-07-12 16:00:42
fix shanghaitech_non_overlap 9332377476d2d7524822ba98e2321c75be90f709 Thai Thien 2020-07-12 15:57:08
fix flatten collate bb25738b0510915712a17daeb590b668b446b0be Thai Thien 2020-07-12 15:17:14
ccnn_v7_t6_shb c4f59ee01cbdab3f506302588b9667cd1c9f6411 Thai Thien 2020-07-12 14:19:03
do not *4 root if we have flatten augmentation list c534aa36bf314ea32643e92231194bd020d7bf1f Thai Thien 2020-07-12 14:19:00
train val split shb 61581543d16aaa2640bdee0b3573e41d1843770d Thai Thien 2020-07-12 14:06:25
flatten collate c04708ae0defc81dbf441395e1d27de6a1d598fc Thai Thien 2020-07-12 13:54:01
travis remove nightly d4b0c714823046bbafcd3c816d56f7079c76d126 Thai Thien 2020-07-12 13:27:55
travis e6368ec3102e01f1bdc71a80a78f0db3617d7e08 Thai Thien 2020-07-12 12:33:21
flatten_collate 1e460396875c205c42de27449f56e73cd4ec10e0 Thai Thien 2020-07-12 12:23:40
train val split test ratio to 0.1 5091da3f0b45d875a38c2829e4fec5e61116e869 Thai Thien 2020-07-11 03:14:26
f 1defee5dc452c2da5fb540ff1050f6e01fe1878b Thai Thien 2020-07-10 17:04:58
fix log db4d655a313b8f3951baf225a4b197fce4bcdd4b Thai Thien 2020-07-10 16:50:03
typo, trunc 3.0 not 4.0 ee29c49efd0a5087f11662997b8992d25671a33a Thai Thien 2020-07-10 16:44:55
let try with trunc 3.0 f8eac179b1fa79d6451f19a9e6a35b82b94646a4 Thai Thien 2020-07-10 16:43:41
Commit 8ef0edfc8b459326b4c7a7044bc6dc6d30fd7fd6 - shuffle non-overlap, msel1 loss
Author: Thai Thien
Author date (UTC): 2020-07-13 10:48
Committer name: Thai Thien
Committer date (UTC): 2020-07-13 10:48
Parent(s): e2f4dc7bb1bee096ae17101331e0f221d05ba1af
Signing key:
Tree: 9ea8b0cdb567d5f251b37ccbbdb92b899aca02e6
File Lines added Lines deleted
data_flow.py 46 2
experiment_main.py 4 1
mse_l1_loss.py 0 0
File data_flow.py changed (mode: 100644) (index 4d6f927..2dffa53)
... ... def load_data_shanghaitech_non_overlap(img_path, train=True):
464 464 if x==1: if x==1:
465 465 target = np.fliplr(target) target = np.fliplr(target)
466 466 img = img.transpose(Image.FLIP_LEFT_RIGHT) img = img.transpose(Image.FLIP_LEFT_RIGHT)
467
468
469 467 target1 = cv2.resize(target, (int(target.shape[1] / target_factor), int(target.shape[0] / target_factor)), target1 = cv2.resize(target, (int(target.shape[1] / target_factor), int(target.shape[0] / target_factor)),
470 468 interpolation=cv2.INTER_CUBIC) * target_factor * target_factor interpolation=cv2.INTER_CUBIC) * target_factor * target_factor
471 469 # target1 = target1.unsqueeze(0) # make dim (batch size, channel size, x, y) to make model output # target1 = target1.unsqueeze(0) # make dim (batch size, channel size, x, y) to make model output
472 470 target1 = np.expand_dims(target1, axis=0) # make dim (batch size, channel size, x, y) to make model output target1 = np.expand_dims(target1, axis=0) # make dim (batch size, channel size, x, y) to make model output
473 471 crop_img.append(img) crop_img.append(img)
474 472 crop_label.append(target1) crop_label.append(target1)
473 # shuffle in pair
474 tmp_pair = list(zip(crop_img, crop_label))
475 random.shuffle(tmp_pair)
476 crop_img, crop_label = zip(*tmp_pair)
475 477 return crop_img, crop_label return crop_img, crop_label
476 478
477 479 if not train: if not train:
 
... ... def load_data_shanghaitech_non_overlap(img_path, train=True):
481 483 return img_origin, gt_count return img_origin, gt_count
482 484
483 485
486 def load_data_shanghaitech_non_overlap_noflip(img_path, train=True):
487 """
488 per sample, crop 4, non-overlap
489 :param img_path:
490 :param train:
491 :return:
492 """
493 gt_path = img_path.replace('.jpg', '.h5').replace('images', 'ground-truth-h5')
494 img_origin = Image.open(img_path).convert('RGB')
495 crop_size = (int(img_origin.size[0] / 2), int(img_origin.size[1] / 2))
496 gt_file = h5py.File(gt_path, 'r')
497 target_origin = np.asarray(gt_file['density'])
498 target_factor = 8
499
500 if train:
501 # for each image
502 # create 8 patches, 4 non-overlap 4 corner
503 # for each of 4 patch, create another 4 flip
504 crop_img = []
505 crop_label = []
506 for i in range(2):
507 for j in range(2):
508 # crop non-overlap
509 dx = int(i * img_origin.size[0] * 1. / 2)
510 dy = int(j * img_origin.size[1] * 1. / 2)
511 img = img_origin.crop((dx, dy, crop_size[0] + dx, crop_size[1] + dy))
512 target = target_origin[dy:crop_size[1] + dy, dx:crop_size[0] + dx]
513
514
515 target1 = cv2.resize(target, (int(target.shape[1] / target_factor), int(target.shape[0] / target_factor)),
516 interpolation=cv2.INTER_CUBIC) * target_factor * target_factor
517 target1 = np.expand_dims(target1, axis=0) # make dim (batch size, channel size, x, y) to make model output
518 crop_img.append(img)
519 crop_label.append(target1)
520 return crop_img, crop_label
521
522 if not train:
523 # get correct people head count from head annotation
524 mat_path = img_path.replace('.jpg', '.mat').replace('images', 'ground-truth').replace('IMG', 'GT_IMG')
525 gt_count = count_gt_annotation_sha(mat_path)
526 return img_origin, gt_count
527
484 528 def load_data_shanghaitech_crop_random(img_path, train=True): def load_data_shanghaitech_crop_random(img_path, train=True):
485 529 """ """
486 530 40 percent crop 40 percent crop
File experiment_main.py changed (mode: 100644) (index b6b690d..1ea7cdf)
... ... from ignite.metrics import Loss
7 7 from ignite.handlers import Checkpoint, DiskSaver, Timer from ignite.handlers import Checkpoint, DiskSaver, Timer
8 8 from crowd_counting_error_metrics import CrowdCountingMeanAbsoluteError, CrowdCountingMeanSquaredError, CrowdCountingMeanAbsoluteErrorWithCount, CrowdCountingMeanSquaredErrorWithCount from crowd_counting_error_metrics import CrowdCountingMeanAbsoluteError, CrowdCountingMeanSquaredError, CrowdCountingMeanAbsoluteErrorWithCount, CrowdCountingMeanSquaredErrorWithCount
9 9 from visualize_util import get_readable_time from visualize_util import get_readable_time
10
10 from mse_l1_loss import MSEL1Loss
11 11 import torch import torch
12 12 from torch import nn from torch import nn
13 13 from models.meow_experiment.kitten_meow_1 import M1, M2, M3, M4 from models.meow_experiment.kitten_meow_1 import M1, M2, M3, M4
 
... ... if __name__ == "__main__":
130 130 elif args.loss_fn == "MSEMean": elif args.loss_fn == "MSEMean":
131 131 loss_fn = nn.MSELoss(reduction='mean').to(device) loss_fn = nn.MSELoss(reduction='mean').to(device)
132 132 print("use MSEMean") print("use MSEMean")
133 elif args.loss_fn == "MSEL1Mean":
134 loss_fn = MSEL1Loss(reduction='mean').to(device)
135 print("use MSEL1Mean")
133 136 elif args.loss_fn == "MSENone": elif args.loss_fn == "MSENone":
134 137 """ """
135 138 Doesnt work Doesnt work
File mse_l1_loss.py renamed from MSEL1Loss.py (similarity 100%)
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main