def forward(self, x):
if not self.active:
self.eval()
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
out = torch.add(x if self.equalInOut else self.convShortcut(x), out)
if self.active:
return out
else:
return out.detach()
# note: we call it DenseNet for simple compatibility with the training code.
# similar we call it growthRate instead of widen_factor
评论列表
文章目录