def set_output(self):
if sum(self._padding) > 0:
padded_input = tensor.alloc(0.0, # Value to fill the tensor
self._input_shape[0],
self._input_shape[1],
self._input_shape[2] + 2 * self._padding[2],
self._input_shape[3] + 2 * self._padding[3])
padded_input = tensor.set_subtensor(
padded_input[:, :, self._padding[2]:self._padding[2] + self._input_shape[2],
self._padding[3]:self._padding[3] + self._input_shape[3]],
self._prev_layer.output)
padded_input_shape = [self._input_shape[0], self._input_shape[1],
self._input_shape[2] + 2 * self._padding[2],
self._input_shape[3] + 2 * self._padding[3]]
else:
padded_input = self._prev_layer.output
padded_input_shape = self._input_shape
conv_out = conv.conv2d(
input=padded_input,
filters=self.W.val,
filter_shape=self._filter_shape,
image_shape=np.asarray(
padded_input_shape, dtype=np.int16),
border_mode='valid')
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self._output = conv_out + self.b.val.dimshuffle('x', 0, 'x', 'x')
评论列表
文章目录