def _get_normalised_relevance_layer(self, layer, feeder):
def add_epsilon(Zs):
tmp = (T.cast(Zs >= 0, theano.config.floatX)*2.0 - 1.0)
return Zs + self.epsilon * tmp
if isinstance(layer, L.DenseLayer):
forward_layer = L.DenseLayer(layer.input_layer,
layer.num_units,
W=layer.W,
b=layer.b,
nonlinearity=None)
elif isinstance(layer, L.Conv2DLayer):
forward_layer = L.Conv2DLayer(layer.input_layer,
num_filters=layer.num_filters,
W=layer.W,
b=layer.b,
stride=layer.stride,
filter_size=layer.filter_size,
flip_filters=layer.flip_filters,
untie_biases=layer.untie_biases,
pad=layer.pad,
nonlinearity=None)
else:
raise NotImplementedError()
forward_layer = L.ExpressionLayer(forward_layer,
lambda x: 1.0 / add_epsilon(x))
feeder = L.ElemwiseMergeLayer([forward_layer, feeder],
merge_function=T.mul)
return feeder
python类ElemwiseMergeLayer()的实例源码
def _invert_DenseLayer(self,layer,feeder):
# Warning they are swapped here
feeder = self._put_rectifiers(feeder, layer)
feeder = self._get_normalised_relevance_layer(layer, feeder)
output_units = np.prod(L.get_output_shape(layer.input_layer)[1:])
output_layer = L.DenseLayer(feeder, num_units=output_units)
W = output_layer.W
tmp_shape = np.asarray((-1,)+L.get_output_shape(output_layer)[1:])
x_layer = L.ReshapeLayer(layer.input_layer, tmp_shape.tolist())
output_layer = L.ElemwiseMergeLayer(incomings=[x_layer, output_layer],
merge_function=T.mul)
output_layer.W = W
return output_layer
def _invert_Conv2DLayer(self,layer,feeder):
# Warning they are swapped here
feeder = self._put_rectifiers(feeder,layer)
feeder = self._get_normalised_relevance_layer(layer,feeder)
f_s = layer.filter_size
if layer.pad == 'same':
pad = 'same'
elif layer.pad == 'valid' or layer.pad == (0, 0):
pad = 'full'
else:
raise RuntimeError("Define your padding as full or same.")
# By definition the
# Flip filters must be on to be a proper deconvolution.
num_filters = L.get_output_shape(layer.input_layer)[1]
if layer.stride == (4,4):
# Todo: similar code gradient based explainers. Merge.
feeder = L.Upscale2DLayer(feeder, layer.stride, mode='dilate')
output_layer = L.Conv2DLayer(feeder,
num_filters=num_filters,
filter_size=f_s,
stride=1,
pad=pad,
nonlinearity=None,
b=None,
flip_filters=True)
conv_layer = output_layer
tmp = L.SliceLayer(output_layer, slice(0, -3), axis=3)
output_layer = L.SliceLayer(tmp, slice(0, -3), axis=2)
output_layer.W = conv_layer.W
else:
output_layer = L.Conv2DLayer(feeder,
num_filters=num_filters,
filter_size=f_s,
stride=1,
pad=pad,
nonlinearity=None,
b=None,
flip_filters=True)
W = output_layer.W
# Do the multiplication.
x_layer = L.ReshapeLayer(layer.input_layer,
(-1,)+L.get_output_shape(output_layer)[1:])
output_layer = L.ElemwiseMergeLayer(incomings=[x_layer, output_layer],
merge_function=T.mul)
output_layer.W = W
return output_layer