File demo_app/predict_only.py changed (mode: 100644) (index d677298..1702211) |
... |
... |
if __name__ == "__main__": |
10 |
10 |
output into another folder |
output into another folder |
11 |
11 |
output density map and count in csv |
output density map and count in csv |
12 |
12 |
""" |
""" |
13 |
|
NAME="bao2" |
|
|
13 |
|
NAME="for_question_forum" |
14 |
14 |
# INPUT_FOLDER = "/data/ShanghaiTech/part_B/test_data/images/" |
# INPUT_FOLDER = "/data/ShanghaiTech/part_B/test_data/images/" |
15 |
15 |
INPUT_FOLDER = "/home/tt/Downloads/bao2" |
INPUT_FOLDER = "/home/tt/Downloads/bao2" |
16 |
16 |
OUTPUT_FOLDER = "/data/apps/tmp" |
OUTPUT_FOLDER = "/data/apps/tmp" |
|
... |
... |
if __name__ == "__main__": |
21 |
21 |
model = DCCNN() |
model = DCCNN() |
22 |
22 |
model.load_state_dict(loaded_file['model']) |
model.load_state_dict(loaded_file['model']) |
23 |
23 |
model.eval() |
model.eval() |
24 |
|
os.mkdir(os.path.join(OUTPUT_FOLDER, NAME)) |
|
|
24 |
|
os.makedirs(os.path.join(OUTPUT_FOLDER, NAME), exist_ok=True) |
25 |
25 |
log_file = open(os.path.join(OUTPUT_FOLDER, NAME, NAME +".log"), 'w') |
log_file = open(os.path.join(OUTPUT_FOLDER, NAME, NAME +".log"), 'w') |
26 |
26 |
limit_count = 100 |
limit_count = 100 |
27 |
27 |
count = 0 |
count = 0 |
|
... |
... |
if __name__ == "__main__": |
37 |
37 |
log_line = info["name"][0] + "," + str(pred_count.item()) +"\n" |
log_line = info["name"][0] + "," + str(pred_count.item()) +"\n" |
38 |
38 |
log_file.write(log_line) |
log_file.write(log_line) |
39 |
39 |
save_density_map(pred, predict_path) |
save_density_map(pred, predict_path) |
|
40 |
|
torch.save(pred, predict_path+".torch") |
40 |
41 |
print("save to ", predict_path) |
print("save to ", predict_path) |
41 |
42 |
count += 1 |
count += 1 |
42 |
43 |
log_file.close() |
log_file.close() |
File playground/cv_heatmap.py added (mode: 100644) (index 0000000..dd2191b) |
|
1 |
|
import cv2 |
|
2 |
|
import torch |
|
3 |
|
from torch import nn |
|
4 |
|
path = "/data/apps/tmp/for_question_forum" |
|
5 |
|
img_path = "/home/tt/Downloads/bao2/download.jpeg" |
|
6 |
|
density_map_path = "/data/apps/tmp/for_question_forum/PRED_download.jpeg.torch" |
|
7 |
|
|
|
8 |
|
img_tensor = cv2.imread(img_path) |
|
9 |
|
print(type(img_tensor)) |
|
10 |
|
print(img_tensor.shape) |
|
11 |
|
|
|
12 |
|
density_map_tensor = torch.load(density_map_path) |
|
13 |
|
print(img_tensor.shape) |
|
14 |
|
print(density_map_tensor.shape) |
|
15 |
|
print(density_map_tensor.sum()) |
|
16 |
|
density_map_tensor = torch.from_numpy(density_map_tensor).unsqueeze(dim=0).unsqueeze(dim=0) |
|
17 |
|
# module = nn.UpsamplingBilinear2d(scale_factor=8) |
|
18 |
|
# upsampling_density_map_tensor = module(density_map_tensor) |
|
19 |
|
upsampling_density_map_tensor = nn.functional.interpolate(density_map_tensor, scale_factor=8)/64 |
|
20 |
|
print(upsampling_density_map_tensor.sum()) |
|
21 |
|
print(upsampling_density_map_tensor.shape) |
|
22 |
|
|
|
23 |
|
pad_density_map_tensor = torch.zeros((1, 3, img_tensor.shape[0], img_tensor.shape[1])) |
|
24 |
|
pad_density_map_tensor[:, 0,:upsampling_density_map_tensor.shape[2], :upsampling_density_map_tensor.shape[3]] = upsampling_density_map_tensor |
|
25 |
|
print(pad_density_map_tensor.shape) |
|
26 |
|
# pad_density_map_tensor = (pad_density_map_tensor.squeeze(dim=0)/pad_density_map_tensor.max()*255) |
|
27 |
|
pad_density_map_tensor = pad_density_map_tensor.squeeze(dim=0)/pad_density_map_tensor.max() |
|
28 |
|
print(pad_density_map_tensor.shape) |
|
29 |
|
pad_density_map_tensor_match = pad_density_map_tensor.permute(1,2,0) |
|
30 |
|
print(pad_density_map_tensor_match.shape) |
|
31 |
|
pad_density_map_tensor_match_np = pad_density_map_tensor_match.numpy() |
|
32 |
|
print(pad_density_map_tensor_match_np.shape) |
|
33 |
|
print(pad_density_map_tensor_match_np.dtype) |
|
34 |
|
pad_density_map_tensor_match_np = pad_density_map_tensor_match_np.astype("uint8") |
|
35 |
|
print(img_tensor.dtype) |
|
36 |
|
print(pad_density_map_tensor_match_np.dtype) |
|
37 |
|
print(pad_density_map_tensor_match_np[:,:,0:1].shape) |
|
38 |
|
overlay_color = cv2.applyColorMap(pad_density_map_tensor_match_np[:,:,0], colormap=cv2.COLORMAP_JET) |
|
39 |
|
|
|
40 |
|
cv2.imwrite("../visualize/pic/cv2_overlay_color.png", overlay_color) |
File playground/try_to_do_overlay.py added (mode: 100644) (index 0000000..187fcc6) |
|
1 |
|
from torchvision.io import write_png, read_image |
|
2 |
|
import torch |
|
3 |
|
from torch import nn |
|
4 |
|
|
|
5 |
|
path = "/data/apps/tmp/for_question_forum" |
|
6 |
|
img_path = "/home/tt/Downloads/bao2/download.jpeg" |
|
7 |
|
density_map_path = "/data/apps/tmp/for_question_forum/PRED_download.jpeg.torch" |
|
8 |
|
|
|
9 |
|
img_tensor = read_image(img_path) |
|
10 |
|
density_map_tensor = torch.load(density_map_path) |
|
11 |
|
|
|
12 |
|
print(img_tensor.shape) |
|
13 |
|
print(density_map_tensor.shape) |
|
14 |
|
print(density_map_tensor.sum()) |
|
15 |
|
density_map_tensor = torch.from_numpy(density_map_tensor).unsqueeze(dim=0).unsqueeze(dim=0) |
|
16 |
|
# module = nn.UpsamplingBilinear2d(scale_factor=8) |
|
17 |
|
# upsampling_density_map_tensor = module(density_map_tensor) |
|
18 |
|
upsampling_density_map_tensor = nn.functional.interpolate(density_map_tensor, scale_factor=8)/64 |
|
19 |
|
print(upsampling_density_map_tensor.sum()) |
|
20 |
|
print(upsampling_density_map_tensor.shape) |
|
21 |
|
|
|
22 |
|
pad_density_map_tensor = torch.zeros((1, 3, img_tensor.shape[1], img_tensor.shape[2])) |
|
23 |
|
pad_density_map_tensor[:, 0,:upsampling_density_map_tensor.shape[2], :upsampling_density_map_tensor.shape[3]] = upsampling_density_map_tensor |
|
24 |
|
print(pad_density_map_tensor.shape) |
|
25 |
|
pad_density_map_tensor = (pad_density_map_tensor.squeeze(dim=0)/pad_density_map_tensor.max()*255) |
|
26 |
|
# pad_density_map_tensor = pad_density_map_tensor.squeeze(dim=0) |
|
27 |
|
|
|
28 |
|
print(img_tensor.dtype) |
|
29 |
|
print(pad_density_map_tensor.dtype) |
|
30 |
|
|
|
31 |
|
overlay_density_map = img_tensor.detach().clone() |
|
32 |
|
overlay_density_map[0] = torch.clamp_max(img_tensor[0] + pad_density_map_tensor[0] * 2, max=255) |
|
33 |
|
|
|
34 |
|
write_png(overlay_density_map.type(torch.uint8), "../visualize/pic/overlay.png") |
|
35 |
|
write_png(pad_density_map_tensor.type(torch.uint8), "../visualize/pic/pad.png") |
File playground/video_with_torchvision.py added (mode: 100644) (index 0000000..26db1f5) |
|
1 |
|
from torchvision.io.video import read_video, write_video |
|
2 |
|
from torchvision.io import write_png |
|
3 |
|
import os |
|
4 |
|
|
|
5 |
|
video_path = "/home/tt/Videos/VID_20201202_133703_090.mp4" |
|
6 |
|
out_path = "../visualize/vid" |
|
7 |
|
|
|
8 |
|
|
|
9 |
|
def example_load_frame(): |
|
10 |
|
v, a, info = read_video("/home/tt/Videos/VID_20201202_133703_090.mp4", pts_unit='sec') |
|
11 |
|
print(v.shape) # torch.Size([467, 1080, 1920, 3]) |
|
12 |
|
# write a frame |
|
13 |
|
single_frame = v[100] |
|
14 |
|
print(single_frame.shape) # torch.Size([1080, 1920, 3]) |
|
15 |
|
single_frame = single_frame.permute(2,0,1) # to CHW |
|
16 |
|
print(single_frame.shape) |
|
17 |
|
file_out = os.path.join(out_path, "single_frame.png") |
|
18 |
|
write_png(single_frame, file_out) |
|
19 |
|
print("done write to ", file_out) |
|
20 |
|
|