List of commits:
Subject Hash Author Date (UTC)
visualize eval context aware network seem ok f3fe45c23dfeab3730624737efabb0b14d23c25b Thai Thien 2020-02-02 04:50:34
visualize_shanghaitech_pacnn_with_perspective run without error 12366a2de2bd60ff4bd36e6132d44e37dedf7462 Thai Thien 2020-02-02 04:21:16
eval context aware network on ShanghaiTechB can run e8c454d2b6d287c830c1286c9a37884b3cfc615f Thai Thien 2020-02-02 04:09:14
import ShanghaiTechDataPath in data_util e81eb56315d44375ff5c0e747d61456601492f8f Thai Thien 2020-02-02 04:04:36
add model_context_aware_network.py 2a36025c001d85afc064c090f4d22987b328977b Thai Thien 2020-02-02 03:46:38
PACNN (TODO: test this) 44d5ae7ec57c760fb4f105dd3e3492148a0cc075 Thai Thien 2020-02-02 03:40:26
add data path 80134de767d0137a663f343e4606bafc57a1bc1f Thai Thien 2020-02-02 03:38:21
test if ShanghaiTech datapath is correct 97ee84944a4393ec3732879b24f614826f8e7798 Thai Thien 2020-02-01 03:57:31
refactor and test ShanghaiTech datapath 9542ebc00f257edc38690180b7a4353794be4019 Thai Thien 2020-02-01 03:53:49
fix the unzip flow b53c5989935335377eb6a88c942713d3eccc5df7 Thai Thien 2020-02-01 03:53:13
data_script run seem ok 67420c08fc1c10a66404d3698994865726a106cd Thai Thien 2020-02-01 03:33:18
add perspective 642d6fff8c9f31e510fda85a7fb631fb855d8a6d Thai Thien 2019-10-06 16:54:44
fix padding with p 86c2fa07822d956a34b3b37e14da485a4249f01b Thai Thien 2019-10-06 02:52:58
pacnn perspective loss fb673e38a5f24ae9004fe2b7b93c88991e0c2304 Thai Thien 2019-10-06 01:38:28
data_flow shanghaitech_pacnn_with_perspective seem working 91d350a06f358e03223966297d124daee94123d0 Thai Thien 2019-10-06 01:31:11
multiscale loss and final loss only mode c65dd0e74ad28503821e5c8651a3b47b4a0c7c64 Thai Thien 2019-10-05 15:58:19
wip : perspective map eac63f2671dc5b064753acc4f40bf0f9f216ad2a Thai Thien 2019-10-04 16:26:56
shell script f2106e700b6f6174d4dd276f25ec6f3d9ff239bb thient 2019-10-04 07:42:51
WIP 42c7c8e1d772fbbda61a4bdf9e329f74e1efb600 tthien 2019-10-03 17:52:47
add readme 580cf43d1edddd67b1f6a2c57fdd5cee3dba925c Thai Thien 2019-10-02 17:44:49
Commit f3fe45c23dfeab3730624737efabb0b14d23c25b - visualize eval context aware network seem ok
Author: Thai Thien
Author date (UTC): 2020-02-02 04:50
Committer name: Thai Thien
Committer date (UTC): 2020-02-02 04:50
Parent(s): b96910e05dd6e363f8e9669fd4e5df75c43b88d9
Signing key:
Tree: fa7d80f0532443593d3b3d56f770d5491f6bf3cf
File Lines added Lines deleted
eval_context_aware_network.py 34 1
File eval_context_aware_network.py changed (mode: 100644) (index 760e6bd..6c8d057)
... ... from torchvision import transforms
10 10 from models.context_aware_network import CANNet from models.context_aware_network import CANNet
11 11 from data_util import ShanghaiTechDataPath from data_util import ShanghaiTechDataPath
12 12 from hard_code_variable import HardCodeVariable from hard_code_variable import HardCodeVariable
13 from visualize_util import save_img, save_density_map
13 14
14 15 _description=""" _description="""
15 16 This file run predict This file run predict
 
... ... Data path = /home/tt/project/ShanghaiTechCAN/part_B/test_data/images
17 18 model path = /home/tt/project/MODEL/Context-aware/part_B_pre.pth.tar model path = /home/tt/project/MODEL/Context-aware/part_B_pre.pth.tar
18 19 """ """
19 20
21 # if true, render every density map and its image
22 IS_VISUAL = True
23 saved_folder = "visualize/eval_context_aware_network_part_b"
24
20 25 transform=transforms.Compose([ transform=transforms.Compose([
21 26 transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406], transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
22 27 std=[0.229, 0.224, 0.225]), std=[0.229, 0.224, 0.225]),
23 28 ]) ])
24 29
30 transform_no_normalize=transforms.Compose([
31 transforms.ToTensor()
32 ])
33
25 34 # the folder contains all the test images # the folder contains all the test images
26 35 hard_code = HardCodeVariable() hard_code = HardCodeVariable()
27 36 shanghaitech_data = ShanghaiTechDataPath(root=hard_code.SHANGHAITECH_PATH) shanghaitech_data = ShanghaiTechDataPath(root=hard_code.SHANGHAITECH_PATH)
 
... ... img_paths=[]
33 42
34 43 for img_path in glob.glob(os.path.join(img_folder, '*.jpg')): for img_path in glob.glob(os.path.join(img_folder, '*.jpg')):
35 44 img_paths.append(img_path) img_paths.append(img_path)
36 # img_paths = img_paths[:10]
45 img_paths = img_paths[:10]
37 46
38 47
39 48 model = CANNet() model = CANNet()
 
... ... model.eval()
49 58 pred= [] pred= []
50 59 gt = [] gt = []
51 60
61 if IS_VISUAL:
62 os.makedirs(saved_folder, exist_ok=True)
63
52 64 for i in range(len(img_paths)): for i in range(len(img_paths)):
65 img_original = transform_no_normalize(Image.open(img_paths[i]).convert('RGB')).unsqueeze(0)
53 66 img = transform(Image.open(img_paths[i]).convert('RGB')).cuda() img = transform(Image.open(img_paths[i]).convert('RGB')).cuda()
54 67 img = img.unsqueeze(0) img = img.unsqueeze(0)
55 68 h,w = img.shape[2:4] h,w = img.shape[2:4]
56 69 h_d = int(h/2) h_d = int(h/2)
57 70 w_d = int(w/2) w_d = int(w/2)
58 71 img_1 = Variable(img[:,:,:h_d,:w_d].cuda()) img_1 = Variable(img[:,:,:h_d,:w_d].cuda())
72 img_original_1 = img_original[:,:,:h_d,:w_d]
73
59 74 img_2 = Variable(img[:,:,:h_d,w_d:].cuda()) img_2 = Variable(img[:,:,:h_d,w_d:].cuda())
75 img_original_2 = img_original[:,:,:h_d,w_d:]
76
60 77 img_3 = Variable(img[:,:,h_d:,:w_d].cuda()) img_3 = Variable(img[:,:,h_d:,:w_d].cuda())
78 img_original_3 = img_original[:,:,h_d:,:w_d]
79
61 80 img_4 = Variable(img[:,:,h_d:,w_d:].cuda()) img_4 = Variable(img[:,:,h_d:,w_d:].cuda())
81 img_original_4 = img_original[:,:,h_d:,w_d:]
82
62 83 density_1 = model(img_1).data.cpu().numpy() density_1 = model(img_1).data.cpu().numpy()
63 84 density_2 = model(img_2).data.cpu().numpy() density_2 = model(img_2).data.cpu().numpy()
64 85 density_3 = model(img_3).data.cpu().numpy() density_3 = model(img_3).data.cpu().numpy()
 
... ... for i in range(len(img_paths)):
71 92 pred.append(pred_sum) pred.append(pred_sum)
72 93 gt.append(np.sum(groundtruth)) gt.append(np.sum(groundtruth))
73 94 print("done ", i, "pred ",pred_sum, " gt ", np.sum(groundtruth)) print("done ", i, "pred ",pred_sum, " gt ", np.sum(groundtruth))
95 ## print out visual
96 name_prefix = os.path.join(saved_folder, "sample_"+str(i))
97 save_img(img_original_1, name_prefix+"_img1.png")
98 save_img(img_original_2, name_prefix + "_img2.png")
99 save_img(img_original_3, name_prefix + "_img3.png")
100 save_img(img_original_4, name_prefix + "_img4.png")
101
102 save_density_map(density_1.squeeze(), name_prefix + "_pred1.png")
103 save_density_map(density_2.squeeze(), name_prefix + "_pred2.png")
104 save_density_map(density_3.squeeze(), name_prefix + "_pred3.png")
105 save_density_map(density_4.squeeze(), name_prefix + "_pred4.png")
106 ##
74 107
75 108 print(len(pred)) print(len(pred))
76 109 print(len(gt)) print(len(gt))
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/hahattpro/crowd_counting_framework

Clone this repository using git:
git clone git://git.rocketgit.com/user/hahattpro/crowd_counting_framework

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main