def forward(self, x):
upblock = True
# Downsizing layer - Large Kernel ensures large receptive field on the residual blocks
h = F.relu(self.b2(self.c1(x)))
# Residual Layers
for r in self.rs:
h = r(h) # will go through all residual blocks in this loop
if upblock:
# Upsampling Layers - improvement suggested by [2] to remove "checkerboard pattern"
for u in self.up:
h = u(h) # will go through all upsampling blocks in this loop
else:
# As recommended by [1]
h = F.relu(self.bc2(self.dc2(h)))
h = F.relu(self.bc3(self.dc3(h)))
# Last layer and scaled tanh activation - Scaled from 0 to 1 instead of 0 - 255
h = F.tanh(self.c3(h))
h = torch.add(h, 1.)
h = torch.mul(h, 0.5)
return h
评论列表
文章目录