List of commits:
Subject Hash Author Date (UTC)
some demo and playground code b1d48dd89f1bf3a30b82c51b25cdd68fb77f1023 Thai Thien 2020-12-04 15:53:19
some predict demo 7f2214ad2882874d5407a2d4d5bd115b7089a020 Thai Thien 2020-12-02 14:55:22
demo 3d5fa0d764843d4fd780e44547fd0b13a367c6bc Thai Thien 2020-11-27 19:28:37
notable image b0f2b0e9732c2f2031831eb80eeaf7e6dab1f5bd Thai Thien 2020-11-22 07:11:16
notebook 5b8b840bc1fd0d806a13646477988534f4982fac Thai Thien 2020-11-22 07:10:42
l2_adamw2_bigtail13i_t13_sha 7fd22510845dec808100058190a202b805bf6a02 Thai Thien 2020-09-23 14:17:00
l2_adamw2_bigtail13i_t12_sha bf67476db55f55f348068bc7683f68834659cc96 Thai Thien 2020-09-23 14:14:35
l2_adamw2_bigtail13i_t11_sha 1365b305bf541a3d941d173b169fa14aa811beb1 Thai Thien 2020-09-22 19:31:17
l2_adamw2_bigtail13i_t10_sha 4f6f2b8d3ac4654cc2ed94ca6900643f3ca07be9 Thai Thien 2020-09-22 19:27:24
l2_adamw2_bigtail13i_t9_sha 576b424688dff640a0a7d448cbd615cfb4f33d54 Thai Thien 2020-09-22 19:21:17
t8 9e00ee3a50c93bbd726ebd8554a68f535d836691 Thai Thien 2020-09-22 19:11:37
split a a7b2115bee8ab76497528b84aee09c79631575b1 Thai Thien 2020-09-22 17:48:37
load_data_shanghaitech_256_v2 and l2_adamw2_bigtail13i_t7_sha 07bca1a460c30ed7ec7269d72dad99eef6fc96b3 Thai Thien 2020-09-22 17:04:45
adamw1_ccnnv7_t7_jhu c5f106103aa3696827627f5e815105d9432e2acb Thai Thien 2020-09-16 18:21:22
adamw1_ccnnv7_t6_jhu.sh 5c8644da7445b3dd4f6d2611d564592e8e8ed45c Thai Thien 2020-09-16 18:19:07
adamw1_CompactCNNV7_t6_jhu 1fa1d544a254cd042bcdd36e8de2984fe3d5244f Thai Thien 2020-09-16 17:56:15
adamw1_CompactCNNV7_t5_jhu 40720162c9e9b731fb852ed7e3e191228e421ced Thai Thien 2020-09-16 17:46:20
test_data cache 73633cbd6d70448268a3e1534440601ddcf75276 Thai Thien 2020-09-16 17:12:57
fix test data not exist 474f506204251e26825801499f6bc503f44f8410 Thai Thien 2020-09-16 16:41:00
fix test data not exist 6377e18d87e0fabbf9b4ed143e26ae5912c0b872 Thai Thien 2020-09-16 16:40:06
Commit b1d48dd89f1bf3a30b82c51b25cdd68fb77f1023 - some demo and playground code
Author: Thai Thien
Author date (UTC): 2020-12-04 15:53
Committer name: Thai Thien
Committer date (UTC): 2020-12-04 15:53
Parent(s): 7f2214ad2882874d5407a2d4d5bd115b7089a020
Signing key:
Tree: 2653e96f8fe6a72c024d00583c02c1e7e4b1faeb
File Lines added Lines deleted
demo_app/predict_only.py 3 2
demo_app/predict_video.py 0 0
playground/cv_heatmap.py 40 0
playground/try_to_do_overlay.py 35 0
playground/video_with_torchvision.py 20 0
File demo_app/predict_only.py changed (mode: 100644) (index d677298..1702211)
... ... if __name__ == "__main__":
10 10 output into another folder output into another folder
11 11 output density map and count in csv output density map and count in csv
12 12 """ """
13 NAME="bao2"
13 NAME="for_question_forum"
14 14 # INPUT_FOLDER = "/data/ShanghaiTech/part_B/test_data/images/" # INPUT_FOLDER = "/data/ShanghaiTech/part_B/test_data/images/"
15 15 INPUT_FOLDER = "/home/tt/Downloads/bao2" INPUT_FOLDER = "/home/tt/Downloads/bao2"
16 16 OUTPUT_FOLDER = "/data/apps/tmp" OUTPUT_FOLDER = "/data/apps/tmp"
 
... ... if __name__ == "__main__":
21 21 model = DCCNN() model = DCCNN()
22 22 model.load_state_dict(loaded_file['model']) model.load_state_dict(loaded_file['model'])
23 23 model.eval() model.eval()
24 os.mkdir(os.path.join(OUTPUT_FOLDER, NAME))
24 os.makedirs(os.path.join(OUTPUT_FOLDER, NAME), exist_ok=True)
25 25 log_file = open(os.path.join(OUTPUT_FOLDER, NAME, NAME +".log"), 'w') log_file = open(os.path.join(OUTPUT_FOLDER, NAME, NAME +".log"), 'w')
26 26 limit_count = 100 limit_count = 100
27 27 count = 0 count = 0
 
... ... if __name__ == "__main__":
37 37 log_line = info["name"][0] + "," + str(pred_count.item()) +"\n" log_line = info["name"][0] + "," + str(pred_count.item()) +"\n"
38 38 log_file.write(log_line) log_file.write(log_line)
39 39 save_density_map(pred, predict_path) save_density_map(pred, predict_path)
40 torch.save(pred, predict_path+".torch")
40 41 print("save to ", predict_path) print("save to ", predict_path)
41 42 count += 1 count += 1
42 43 log_file.close() log_file.close()
File demo_app/predict_video.py copied from file debug/__init__.py (similarity 100%)
File playground/cv_heatmap.py added (mode: 100644) (index 0000000..dd2191b)
1 import cv2
2 import torch
3 from torch import nn
4 path = "/data/apps/tmp/for_question_forum"
5 img_path = "/home/tt/Downloads/bao2/download.jpeg"
6 density_map_path = "/data/apps/tmp/for_question_forum/PRED_download.jpeg.torch"
7
8 img_tensor = cv2.imread(img_path)
9 print(type(img_tensor))
10 print(img_tensor.shape)
11
12 density_map_tensor = torch.load(density_map_path)
13 print(img_tensor.shape)
14 print(density_map_tensor.shape)
15 print(density_map_tensor.sum())
16 density_map_tensor = torch.from_numpy(density_map_tensor).unsqueeze(dim=0).unsqueeze(dim=0)
17 # module = nn.UpsamplingBilinear2d(scale_factor=8)
18 # upsampling_density_map_tensor = module(density_map_tensor)
19 upsampling_density_map_tensor = nn.functional.interpolate(density_map_tensor, scale_factor=8)/64
20 print(upsampling_density_map_tensor.sum())
21 print(upsampling_density_map_tensor.shape)
22
23 pad_density_map_tensor = torch.zeros((1, 3, img_tensor.shape[0], img_tensor.shape[1]))
24 pad_density_map_tensor[:, 0,:upsampling_density_map_tensor.shape[2], :upsampling_density_map_tensor.shape[3]] = upsampling_density_map_tensor
25 print(pad_density_map_tensor.shape)
26 # pad_density_map_tensor = (pad_density_map_tensor.squeeze(dim=0)/pad_density_map_tensor.max()*255)
27 pad_density_map_tensor = pad_density_map_tensor.squeeze(dim=0)/pad_density_map_tensor.max()
28 print(pad_density_map_tensor.shape)
29 pad_density_map_tensor_match = pad_density_map_tensor.permute(1,2,0)
30 print(pad_density_map_tensor_match.shape)
31 pad_density_map_tensor_match_np = pad_density_map_tensor_match.numpy()
32 print(pad_density_map_tensor_match_np.shape)
33 print(pad_density_map_tensor_match_np.dtype)
34 pad_density_map_tensor_match_np = pad_density_map_tensor_match_np.astype("uint8")
35 print(img_tensor.dtype)
36 print(pad_density_map_tensor_match_np.dtype)
37 print(pad_density_map_tensor_match_np[:,:,0:1].shape)
38 overlay_color = cv2.applyColorMap(pad_density_map_tensor_match_np[:,:,0], colormap=cv2.COLORMAP_JET)
39
40 cv2.imwrite("../visualize/pic/cv2_overlay_color.png", overlay_color)
File playground/try_to_do_overlay.py added (mode: 100644) (index 0000000..187fcc6)
1 from torchvision.io import write_png, read_image
2 import torch
3 from torch import nn
4
5 path = "/data/apps/tmp/for_question_forum"
6 img_path = "/home/tt/Downloads/bao2/download.jpeg"
7 density_map_path = "/data/apps/tmp/for_question_forum/PRED_download.jpeg.torch"
8
9 img_tensor = read_image(img_path)
10 density_map_tensor = torch.load(density_map_path)
11
12 print(img_tensor.shape)
13 print(density_map_tensor.shape)
14 print(density_map_tensor.sum())
15 density_map_tensor = torch.from_numpy(density_map_tensor).unsqueeze(dim=0).unsqueeze(dim=0)
16 # module = nn.UpsamplingBilinear2d(scale_factor=8)
17 # upsampling_density_map_tensor = module(density_map_tensor)
18 upsampling_density_map_tensor = nn.functional.interpolate(density_map_tensor, scale_factor=8)/64
19 print(upsampling_density_map_tensor.sum())
20 print(upsampling_density_map_tensor.shape)
21
22 pad_density_map_tensor = torch.zeros((1, 3, img_tensor.shape[1], img_tensor.shape[2]))
23 pad_density_map_tensor[:, 0,:upsampling_density_map_tensor.shape[2], :upsampling_density_map_tensor.shape[3]] = upsampling_density_map_tensor
24 print(pad_density_map_tensor.shape)
25 pad_density_map_tensor = (pad_density_map_tensor.squeeze(dim=0)/pad_density_map_tensor.max()*255)
26 # pad_density_map_tensor = pad_density_map_tensor.squeeze(dim=0)
27
28 print(img_tensor.dtype)
29 print(pad_density_map_tensor.dtype)
30
31 overlay_density_map = img_tensor.detach().clone()
32 overlay_density_map[0] = torch.clamp_max(img_tensor[0] + pad_density_map_tensor[0] * 2, max=255)
33
34 write_png(overlay_density_map.type(torch.uint8), "../visualize/pic/overlay.png")
35 write_png(pad_density_map_tensor.type(torch.uint8), "../visualize/pic/pad.png")
File playground/video_with_torchvision.py added (mode: 100644) (index 0000000..26db1f5)
1 from torchvision.io.video import read_video, write_video
2 from torchvision.io import write_png
3 import os
4
5 video_path = "/home/tt/Videos/VID_20201202_133703_090.mp4"
6 out_path = "../visualize/vid"
7
8
9 def example_load_frame():
10 v, a, info = read_video("/home/tt/Videos/VID_20201202_133703_090.mp4", pts_unit='sec')
11 print(v.shape) # torch.Size([467, 1080, 1920, 3])
12 # write a frame
13 single_frame = v[100]
14 print(single_frame.shape) # torch.Size([1080, 1920, 3])
15 single_frame = single_frame.permute(2,0,1) # to CHW
16 print(single_frame.shape)
17 file_out = os.path.join(out_path, "single_frame.png")
18 write_png(single_frame, file_out)
19 print("done write to ", file_out)
20
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main