def forward(self, input):
return F.batch_norm(input,
running_mean=self.running_mean[:input.size(1)],
running_var=self.running_var[:input.size(1)],
weight=self.weight[:input.size(1)],
bias=self.bias[:input.size(1)],
training=self.training)
# A convenience wrapper to prevent the forward() method of SMASH from
# being annoyingly verbose. This version of a fully-connected layer simply
# slices its weights according to the size of the incoming tensor.
# Note that the bias does not need slicing, as it's defined wrt the output dim.
评论列表
文章目录