File data_flow.py changed (mode: 100644) (index 029f79b..812e169) |
... |
... |
def get_dataloader(train_list, val_list, test_list, dataset_name="shanghaitech", |
1456 |
1456 |
def simple_predict_data_load_fn(img_path): |
def simple_predict_data_load_fn(img_path): |
1457 |
1457 |
img_name = img_path.split("/")[-1] |
img_name = img_path.split("/")[-1] |
1458 |
1458 |
# when debug, give information on p_count and img_name |
# when debug, give information on p_count and img_name |
|
1459 |
|
img_id = img_name.split(".")[0].split("_")[-1] |
1459 |
1460 |
debug_info = {"img_path":img_path, |
debug_info = {"img_path":img_path, |
1460 |
|
"name": img_name} |
|
|
1461 |
|
"name": img_name, |
|
1462 |
|
"id": img_id} |
1461 |
1463 |
img_origin = Image.open(img_path).convert('RGB') |
img_origin = Image.open(img_path).convert('RGB') |
1462 |
1464 |
return img_origin, debug_info |
return img_origin, debug_info |
1463 |
1465 |
|
|
File demo_app/predict_only.py changed (mode: 100644) (index 6d18a33..d677298) |
1 |
1 |
import os |
import os |
|
2 |
|
import torch |
2 |
3 |
from data_flow import get_predict_dataloader |
from data_flow import get_predict_dataloader |
|
4 |
|
from models.dccnn import DCCNN |
|
5 |
|
from visualize_util import save_density_map_normalize, save_density_map |
|
6 |
|
|
3 |
7 |
if __name__ == "__main__": |
if __name__ == "__main__": |
4 |
8 |
""" |
""" |
5 |
9 |
predict all in folder |
predict all in folder |
6 |
10 |
output into another folder |
output into another folder |
7 |
11 |
output density map and count in csv |
output density map and count in csv |
8 |
12 |
""" |
""" |
9 |
|
INPUT_FOLDER = "/data/ShanghaiTech/part_B/test_data/images/" |
|
|
13 |
|
NAME="bao2" |
|
14 |
|
# INPUT_FOLDER = "/data/ShanghaiTech/part_B/test_data/images/" |
|
15 |
|
INPUT_FOLDER = "/home/tt/Downloads/bao2" |
10 |
16 |
OUTPUT_FOLDER = "/data/apps/tmp" |
OUTPUT_FOLDER = "/data/apps/tmp" |
|
17 |
|
MODEL = "/home/tt/project/C-3-folder/trained_model/adamw1_bigtail13i_t1_shb_checkpoint_valid_mae=-7.574910521507263.pth" |
11 |
18 |
input_list = [os.path.join(INPUT_FOLDER, dir) for dir in os.listdir(INPUT_FOLDER)] |
input_list = [os.path.join(INPUT_FOLDER, dir) for dir in os.listdir(INPUT_FOLDER)] |
12 |
19 |
loader = get_predict_dataloader(input_list) |
loader = get_predict_dataloader(input_list) |
|
20 |
|
loaded_file = torch.load(MODEL) |
|
21 |
|
model = DCCNN() |
|
22 |
|
model.load_state_dict(loaded_file['model']) |
|
23 |
|
model.eval() |
|
24 |
|
os.mkdir(os.path.join(OUTPUT_FOLDER, NAME)) |
|
25 |
|
log_file = open(os.path.join(OUTPUT_FOLDER, NAME, NAME +".log"), 'w') |
|
26 |
|
limit_count = 100 |
|
27 |
|
count = 0 |
13 |
28 |
for img, info in loader: |
for img, info in loader: |
14 |
|
print(img.shape) |
|
15 |
|
print(info) |
|
|
29 |
|
if count > limit_count: |
|
30 |
|
break |
|
31 |
|
predict_name = "PRED_" + info["name"][0] |
|
32 |
|
|
|
33 |
|
predict_path = os.path.join(OUTPUT_FOLDER, NAME, predict_name) |
|
34 |
|
pred = model(img) |
|
35 |
|
pred = pred.detach().numpy()[0][0] |
|
36 |
|
pred_count = pred.sum() |
|
37 |
|
log_line = info["name"][0] + "," + str(pred_count.item()) +"\n" |
|
38 |
|
log_file.write(log_line) |
|
39 |
|
save_density_map(pred, predict_path) |
|
40 |
|
print("save to ", predict_path) |
|
41 |
|
count += 1 |
|
42 |
|
log_file.close() |
File models/dccnn.py added (mode: 100644) (index 0000000..87b5e97) |
|
1 |
|
import torch.nn as nn |
|
2 |
|
import torch |
|
3 |
|
import collections |
|
4 |
|
import torch.nn.functional as F |
|
5 |
|
|
|
6 |
|
class DCCNN(nn.Module): |
|
7 |
|
""" |
|
8 |
|
A REAL-TIME DEEP NETWORK FOR CROWD COUNTING |
|
9 |
|
https://arxiv.org/pdf/2002.06515.pdf |
|
10 |
|
""" |
|
11 |
|
def __init__(self, load_weights=False): |
|
12 |
|
super(DCCNN, self).__init__() |
|
13 |
|
self.model_note = "BigTail12i, batchnorm default setting, add bn red, green, blue, i mean discard inplace" |
|
14 |
|
self.red_cnn = nn.Conv2d(3, 10, 9, padding=4) |
|
15 |
|
self.green_cnn = nn.Conv2d(3, 14, 7, padding=3) |
|
16 |
|
self.blue_cnn = nn.Conv2d(3, 16, 5, padding=2) |
|
17 |
|
|
|
18 |
|
self.c0 = nn.Conv2d(40, 40, 3, padding=2, dilation=2) |
|
19 |
|
|
|
20 |
|
self.max_pooling = nn.MaxPool2d(kernel_size=2, stride=2) |
|
21 |
|
self.avg_pooling = nn.AvgPool2d(kernel_size=2, stride=2) |
|
22 |
|
|
|
23 |
|
self.c1 = nn.Conv2d(40, 60, 3, padding=2, dilation=2) |
|
24 |
|
self.c2 = nn.Conv2d(60, 40, 3, padding=2, dilation=2) |
|
25 |
|
self.c3 = nn.Conv2d(40, 20, 3, padding=2, dilation=2) |
|
26 |
|
self.c4 = nn.Conv2d(20, 10, 3, padding=2, dilation=2) |
|
27 |
|
self.output = nn.Conv2d(10, 1, 1) |
|
28 |
|
|
|
29 |
|
self.bn_red = nn.BatchNorm2d(10) |
|
30 |
|
self.bn_green = nn.BatchNorm2d(14) |
|
31 |
|
self.bn_blue = nn.BatchNorm2d(16) |
|
32 |
|
|
|
33 |
|
self.bn00 = nn.BatchNorm2d(40) |
|
34 |
|
self.bn0 = nn.BatchNorm2d(40) |
|
35 |
|
self.bn1 = nn.BatchNorm2d(60) |
|
36 |
|
self.bn2 = nn.BatchNorm2d(40) |
|
37 |
|
self.bn3 = nn.BatchNorm2d(20) |
|
38 |
|
self.bn4 = nn.BatchNorm2d(10) |
|
39 |
|
|
|
40 |
|
def forward(self,x): |
|
41 |
|
x_red = F.relu(self.red_cnn(x)) |
|
42 |
|
x_red = self.bn_red(x_red) |
|
43 |
|
x_green = F.relu(self.green_cnn(x)) |
|
44 |
|
x_green = self.bn_green(x_green) |
|
45 |
|
x_blue = F.relu(self.blue_cnn(x)) |
|
46 |
|
x_blue = self.bn_blue(x_blue) |
|
47 |
|
|
|
48 |
|
x = torch.cat((x_red, x_green, x_blue), 1) |
|
49 |
|
x = self.bn00(x) |
|
50 |
|
x = self.max_pooling(x) |
|
51 |
|
|
|
52 |
|
x = F.relu(self.c0(x)) |
|
53 |
|
x = self.bn0(x) |
|
54 |
|
x = F.relu(self.c1(x)) |
|
55 |
|
x = self.bn1(x) |
|
56 |
|
x = self.avg_pooling(x) |
|
57 |
|
|
|
58 |
|
x = F.relu(self.c2(x)) |
|
59 |
|
x = self.bn2(x) |
|
60 |
|
|
|
61 |
|
x = F.relu(self.c3(x)) |
|
62 |
|
x = self.bn3(x) |
|
63 |
|
x = self.avg_pooling(x) |
|
64 |
|
|
|
65 |
|
x = F.relu(self.c4(x)) |
|
66 |
|
x = self.bn4(x) |
|
67 |
|
x = self.output(x) |
|
68 |
|
return x |
File visualize_util.py changed (mode: 100644) (index 1b0f845..449bb48) |
... |
... |
def save_density_map(density_map, name): |
17 |
17 |
plt.savefig(name, dpi=600, bbox_inches='tight', pad_inches=0) |
plt.savefig(name, dpi=600, bbox_inches='tight', pad_inches=0) |
18 |
18 |
plt.close() |
plt.close() |
19 |
19 |
|
|
|
20 |
|
|
|
21 |
|
def save_density_map_normalize(density_map, name): |
|
22 |
|
den = density_map / density_map.max(density_map + 1e-20) |
|
23 |
|
plt.figure(dpi=600) |
|
24 |
|
plt.axis('off') |
|
25 |
|
plt.margins(0, 0) |
|
26 |
|
plt.imshow(den, cmap=CM.jet) |
|
27 |
|
plt.savefig(name, dpi=600, bbox_inches='tight', pad_inches=0) |
|
28 |
|
plt.close() |
|
29 |
|
|
|
30 |
|
|
|
31 |
|
|
20 |
32 |
def save_density_map_with_colorrange(density_map, name, vmin, vmax): |
def save_density_map_with_colorrange(density_map, name, vmin, vmax): |
21 |
33 |
plt.figure(dpi=600) |
plt.figure(dpi=600) |
22 |
34 |
plt.axis('off') |
plt.axis('off') |
|
... |
... |
def save_density_map_with_colorrange(density_map, name, vmin, vmax): |
26 |
38 |
plt.savefig(name, dpi=600, bbox_inches='tight', pad_inches=0) |
plt.savefig(name, dpi=600, bbox_inches='tight', pad_inches=0) |
27 |
39 |
plt.close() |
plt.close() |
28 |
40 |
|
|
|
41 |
|
def save_density_map_with_colorrange_max(density_map, name, vmin, vmax): |
|
42 |
|
den = density_map/np.max(density_map+1e-20) |
|
43 |
|
plt.figure(dpi=600) |
|
44 |
|
plt.axis('off') |
|
45 |
|
plt.margins(0, 0) |
|
46 |
|
plt.imshow(den, cmap=CM.jet) |
|
47 |
|
plt.clim(vmin, vmax) |
|
48 |
|
plt.savefig(name, dpi=600, bbox_inches='tight', pad_inches=0) |
|
49 |
|
plt.close() |
|
50 |
|
|
|
51 |
|
|
29 |
52 |
def save_img(imgnp, name): |
def save_img(imgnp, name): |
30 |
53 |
# plt.imshow(imgnp[0].permute(1, 2, 0).numpy()) |
# plt.imshow(imgnp[0].permute(1, 2, 0).numpy()) |
31 |
54 |
plt.imsave(name, imgnp[0].permute(1, 2, 0).numpy()) |
plt.imsave(name, imgnp[0].permute(1, 2, 0).numpy()) |