List of commits:
Subject Hash Author Date (UTC)
demo 3d5fa0d764843d4fd780e44547fd0b13a367c6bc Thai Thien 2020-11-27 19:28:37
notable image b0f2b0e9732c2f2031831eb80eeaf7e6dab1f5bd Thai Thien 2020-11-22 07:11:16
notebook 5b8b840bc1fd0d806a13646477988534f4982fac Thai Thien 2020-11-22 07:10:42
l2_adamw2_bigtail13i_t13_sha 7fd22510845dec808100058190a202b805bf6a02 Thai Thien 2020-09-23 14:17:00
l2_adamw2_bigtail13i_t12_sha bf67476db55f55f348068bc7683f68834659cc96 Thai Thien 2020-09-23 14:14:35
l2_adamw2_bigtail13i_t11_sha 1365b305bf541a3d941d173b169fa14aa811beb1 Thai Thien 2020-09-22 19:31:17
l2_adamw2_bigtail13i_t10_sha 4f6f2b8d3ac4654cc2ed94ca6900643f3ca07be9 Thai Thien 2020-09-22 19:27:24
l2_adamw2_bigtail13i_t9_sha 576b424688dff640a0a7d448cbd615cfb4f33d54 Thai Thien 2020-09-22 19:21:17
t8 9e00ee3a50c93bbd726ebd8554a68f535d836691 Thai Thien 2020-09-22 19:11:37
split a a7b2115bee8ab76497528b84aee09c79631575b1 Thai Thien 2020-09-22 17:48:37
load_data_shanghaitech_256_v2 and l2_adamw2_bigtail13i_t7_sha 07bca1a460c30ed7ec7269d72dad99eef6fc96b3 Thai Thien 2020-09-22 17:04:45
adamw1_ccnnv7_t7_jhu c5f106103aa3696827627f5e815105d9432e2acb Thai Thien 2020-09-16 18:21:22
adamw1_ccnnv7_t6_jhu.sh 5c8644da7445b3dd4f6d2611d564592e8e8ed45c Thai Thien 2020-09-16 18:19:07
adamw1_CompactCNNV7_t6_jhu 1fa1d544a254cd042bcdd36e8de2984fe3d5244f Thai Thien 2020-09-16 17:56:15
adamw1_CompactCNNV7_t5_jhu 40720162c9e9b731fb852ed7e3e191228e421ced Thai Thien 2020-09-16 17:46:20
test_data cache 73633cbd6d70448268a3e1534440601ddcf75276 Thai Thien 2020-09-16 17:12:57
fix test data not exist 474f506204251e26825801499f6bc503f44f8410 Thai Thien 2020-09-16 16:41:00
fix test data not exist 6377e18d87e0fabbf9b4ed143e26ae5912c0b872 Thai Thien 2020-09-16 16:40:06
jhucrowd_downsample_512 30b8b9a63e93a5631f003858ef2ab63dc62c6563 Thai Thien 2020-09-16 16:17:02
fix 77d74f09874a7be0d8fcec141597be4ce331bf0c Thai Thien 2020-09-16 16:08:39
Commit 3d5fa0d764843d4fd780e44547fd0b13a367c6bc - demo
Author: Thai Thien
Author date (UTC): 2020-11-27 19:28
Committer name: Thai Thien
Committer date (UTC): 2020-11-27 19:28
Parent(s): b0f2b0e9732c2f2031831eb80eeaf7e6dab1f5bd
Signing key:
Tree: d45ac55223b73476c60745e63c1036b80fe15c26
File Lines added Lines deleted
args_util.py 1 1
data_flow.py 83 0
debug/perfomance_test_on_shb.py 3 0
demo_app/__init__.py 0 0
demo_app/predict_only.py 15 0
File args_util.py changed (mode: 100644) (index 77806a9..d68650a)
... ... def meow_parse():
136 136 # help="if true, use mse and negative ssim as loss function") # help="if true, use mse and negative ssim as loss function")
137 137 parser.add_argument('--loss_fn', action="store", default="MSE", type=str) parser.add_argument('--loss_fn', action="store", default="MSE", type=str)
138 138 parser.add_argument('--optim', action="store", default="adam", type=str) parser.add_argument('--optim', action="store", default="adam", type=str)
139 parser.add_argument('--eval_only', action="store_true", default=False)
139 parser.add_argument('--eval_only', action="store_true", default=False, help="only evaluate no train")
140 140 arg = parser.parse_args() arg = parser.parse_args()
141 141 return arg return arg
142 142
File data_flow.py changed (mode: 100644) (index 44b669e..029f79b)
... ... def get_dataloader(train_list, val_list, test_list, dataset_name="shanghaitech",
1451 1451 return train_loader, train_loader_for_eval, val_loader, test_loader return train_loader, train_loader_for_eval, val_loader, test_loader
1452 1452 else: else:
1453 1453 return train_loader, val_loader, test_loader return train_loader, val_loader, test_loader
1454
1455
1456 def simple_predict_data_load_fn(img_path):
1457 img_name = img_path.split("/")[-1]
1458 # when debug, give information on p_count and img_name
1459 debug_info = {"img_path":img_path,
1460 "name": img_name}
1461 img_origin = Image.open(img_path).convert('RGB')
1462 return img_origin, debug_info
1463
1464
1465 class PredictListDataset(Dataset):
1466 def __init__(self, root, shape=None, shuffle=True, transform=None, batch_size=1,
1467 debug=False,
1468 num_workers=0):
1469 """
1470 if you have different image size, then batch_size must be 1
1471 :param root:
1472 :param shape:
1473 :param shuffle:
1474 :param transform:
1475 :param train:
1476 :param debug: will print path of image
1477 :param seen:
1478 :param batch_size:
1479 :param num_workers:
1480 """
1481
1482 if shuffle:
1483 random.shuffle(root)
1484
1485 self.nSamples = len(root)
1486 self.lines = root
1487 self.transform = transform
1488
1489 self.cache_train = {}
1490 self.cache_eval = {}
1491 self.debug = debug
1492 self.shape = shape
1493
1494 self.batch_size = batch_size
1495 self.num_workers = num_workers
1496
1497 def __len__(self):
1498 return self.nSamples
1499
1500 def __getitem__(self, index):
1501 assert index <= len(self), 'index range error'
1502 img_path = self.lines[index]
1503 # if self.debug:
1504 # print(img_path)
1505 # try to check cache item if exist
1506 img, info = simple_predict_data_load_fn(img_path)
1507 if self.transform is not None:
1508 if isinstance(img, list):
1509 # for case of generate multiple augmentation per sample
1510 img_r = [self.transform(img_item) for img_item in img]
1511 img = img_r
1512 else:
1513 img = self.transform(img)
1514 return img, info
1515
1516
1517 def get_predict_dataloader(data_list, visualize_mode=False, batch_size=1,
1518 debug=False, test_size=1):
1519
1520 if visualize_mode:
1521 transformer = transforms.Compose([
1522 transforms.ToTensor()
1523 ])
1524 else:
1525 transformer = transforms.Compose([
1526 transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],
1527 std=[0.229, 0.224, 0.225]),
1528 ])
1529
1530 loader = torch.utils.data.DataLoader(PredictListDataset(
1531 data_list,
1532 shuffle=False,
1533 transform=transformer
1534 ))
1535
1536 return loader
File debug/perfomance_test_on_shb.py changed (mode: 100644) (index 1de37c9..3233d29)
... ... import time
24 24 import sys import sys
25 25
26 26 if __name__ == "__main__": if __name__ == "__main__":
27 """
28 go for speed
29 """
27 30 torch.set_num_threads(2) # 4 thread torch.set_num_threads(2) # 4 thread
28 31
29 32 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
File demo_app/__init__.py copied from file debug/__init__.py (similarity 100%)
File demo_app/predict_only.py added (mode: 100644) (index 0000000..6d18a33)
1 import os
2 from data_flow import get_predict_dataloader
3 if __name__ == "__main__":
4 """
5 predict all in folder
6 output into another folder
7 output density map and count in csv
8 """
9 INPUT_FOLDER = "/data/ShanghaiTech/part_B/test_data/images/"
10 OUTPUT_FOLDER = "/data/apps/tmp"
11 input_list = [os.path.join(INPUT_FOLDER, dir) for dir in os.listdir(INPUT_FOLDER)]
12 loader = get_predict_dataloader(input_list)
13 for img, info in loader:
14 print(img.shape)
15 print(info)
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main