File models/pacnn.py changed (mode: 100644) (index ed60f80..a16141c) |
1 |
1 |
import torch.nn as nn |
import torch.nn as nn |
2 |
2 |
import torch |
import torch |
|
3 |
|
|
3 |
4 |
from torchvision import models |
from torchvision import models |
4 |
5 |
import numpy as np |
import numpy as np |
|
6 |
|
import copy |
5 |
7 |
|
|
6 |
8 |
# ssim lost function |
# ssim lost function |
7 |
9 |
|
|
|
... |
... |
class PACNN(nn.Module): |
27 |
29 |
de3 = self.de3_11((self.de3net(x))) |
de3 = self.de3_11((self.de3net(x))) |
28 |
30 |
return de1.squeeze(0), de2.squeeze(0), de3.squeeze(0) |
return de1.squeeze(0), de2.squeeze(0), de3.squeeze(0) |
29 |
31 |
|
|
|
32 |
|
|
|
33 |
|
class PACNNWithPerspectiveMap(nn.Module): |
|
34 |
|
def __init__(self): |
|
35 |
|
super(PACNNWithPerspectiveMap, self).__init__() |
|
36 |
|
self.backbone = models.vgg16(pretrained=True).features |
|
37 |
|
self.de1net = self.backbone[0:23] |
|
38 |
|
|
|
39 |
|
self.de2net = self.backbone[0:30] |
|
40 |
|
|
|
41 |
|
|
|
42 |
|
list_vgg16 = list(self.backbone) |
|
43 |
|
self.conv6_1_1 = nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) |
|
44 |
|
list_vgg16.append(self.conv6_1_1) |
|
45 |
|
self.de3net = nn.Sequential(*list_vgg16) |
|
46 |
|
|
|
47 |
|
|
|
48 |
|
self.conv5_2_3_stack = copy.deepcopy(self.backbone[23:30]) |
|
49 |
|
self.perspective_net = nn.Sequential(self.backbone[0:23], self.conv5_2_3_stack) |
|
50 |
|
|
|
51 |
|
|
|
52 |
|
# 1 1 convolution |
|
53 |
|
self.de1_11 = nn.Conv2d(512, 1, kernel_size=1) |
|
54 |
|
self.de2_11 = nn.Conv2d(512, 1, kernel_size=1) |
|
55 |
|
self.de3_11 = nn.Conv2d(512, 1, kernel_size=1) |
|
56 |
|
self.perspective_11 = nn.Conv2d(512, 1, kernel_size=1) |
|
57 |
|
|
|
58 |
|
# deconvolution upsampling |
|
59 |
|
self.up12 = nn.ConvTranspose2d(512, 1, 2, 2) |
|
60 |
|
self.up23 = nn.ConvTranspose2d(512, 1, 2, 2) |
|
61 |
|
|
|
62 |
|
# if true, use perspective aware |
|
63 |
|
# if false, use average |
|
64 |
|
self.perspective_aware_mode = False |
|
65 |
|
|
|
66 |
|
def forward(self, x): |
|
67 |
|
de1 = self.de1_11((self.de1net(x))) |
|
68 |
|
de2 = self.de2_11((self.de2net(x))) |
|
69 |
|
de3 = self.de3_11((self.de3net(x))) |
|
70 |
|
if self.perspective_aware_mode: |
|
71 |
|
respective = self.perspective_11(self.perspective_net) |
|
72 |
|
# TODO: code more here |
|
73 |
|
else: |
|
74 |
|
de23 = (de2 + self.up23(de3))/2 |
|
75 |
|
de = (de1 + self.up12(de23))/2 |
|
76 |
|
return de |
|
77 |
|
|
30 |
78 |
def count_param(net): |
def count_param(net): |
31 |
79 |
pytorch_total_params = sum(p.numel() for p in net.parameters()) |
pytorch_total_params = sum(p.numel() for p in net.parameters()) |
32 |
80 |
return pytorch_total_params |
return pytorch_total_params |
File models/playground.py changed (mode: 100644) (index ba0106f..4aa73cd) |
... |
... |
class M0(nn.Module): |
24 |
24 |
d = self.backbone(x) |
d = self.backbone(x) |
25 |
25 |
return d |
return d |
26 |
26 |
|
|
|
27 |
|
def deconvolutuion(): |
|
28 |
|
transpose = nn.ConvTranspose2d(3, 1, 2, 2) |
|
29 |
|
in_tensor = torch.rand((1, 3, 224, 224)) |
|
30 |
|
out = transpose(in_tensor) |
|
31 |
|
print(out.size()) |
27 |
32 |
|
|
28 |
|
if __name__ == "__main__": |
|
29 |
|
m0 = M0() |
|
30 |
|
m1 = M1() |
|
31 |
|
|
|
32 |
|
m0_param = list(m0.parameters()) |
|
33 |
|
m1_param = list(m1.parameters()) |
|
34 |
33 |
|
|
35 |
|
print(len(m0_param)) |
|
36 |
|
print(len(m1_param)) |
|
|
34 |
|
if __name__ == "__main__": |
|
35 |
|
deconvolutuion() |
|
36 |
|
# m0 = M0() |
|
37 |
|
# m1 = M1() |
|
38 |
|
# |
|
39 |
|
# m0_param = list(m0.parameters()) |
|
40 |
|
# m1_param = list(m1.parameters()) |
|
41 |
|
# |
|
42 |
|
# print(len(m0_param)) |
|
43 |
|
# print(len(m1_param)) |