def _invert_PadLayer(self, layer, feeder):
assert isinstance(layer, L.PadLayer)
assert layer.batch_ndim == 2
assert len(L.get_output_shape(layer))==4.
tmp = L.SliceLayer(feeder,
slice(layer.width[0][0], -layer.width[0][1]),
axis=2)
return L.SliceLayer(tmp,
slice(layer.width[1][0], -layer.width[1][1]),
axis=3)
python类PadLayer()的实例源码
def _invert_layer(self, layer, feeder):
layer_type = type(layer)
if L.get_output_shape(feeder) != L.get_output_shape(layer):
feeder = L.ReshapeLayer(feeder, (-1,)+L.get_output_shape(layer)[1:])
if layer_type is L.InputLayer:
return self._invert_InputLayer(layer, feeder)
elif layer_type is L.FlattenLayer:
return self._invert_FlattenLayer(layer, feeder)
elif layer_type is L.DenseLayer:
return self._invert_DenseLayer(layer, feeder)
elif layer_type is L.Conv2DLayer:
return self._invert_Conv2DLayer(layer, feeder)
elif layer_type is L.DropoutLayer:
return self._invert_DropoutLayer(layer, feeder)
elif layer_type in [L.MaxPool2DLayer, L.MaxPool1DLayer]:
return self._invert_MaxPoolingLayer(layer, feeder)
elif layer_type is L.PadLayer:
return self._invert_PadLayer(layer, feeder)
elif layer_type is L.SliceLayer:
return self._invert_SliceLayer(layer, feeder)
elif layer_type is L.LocalResponseNormalization2DLayer:
return self._invert_LocalResponseNormalisation2DLayer(layer, feeder)
elif layer_type is L.GlobalPoolLayer:
return self._invert_GlobalPoolLayer(layer, feeder)
else:
return self._invert_UnknownLayer(layer, feeder)
def get_conv_xy(layer, deterministic=True):
w_np = layer.W.get_value()
input_layer = layer.input_layer
if layer.pad == 'same':
input_layer = L.PadLayer(layer.input_layer,
width=np.array(w_np.shape[2:])/2,
batch_ndim=2)
input_shape = L.get_output_shape(input_layer)
max_x = input_shape[2] - w_np.shape[2]
max_y = input_shape[3] - w_np.shape[3]
srng = RandomStreams()
patch_x = srng.random_integers(low=0, high=max_x)
patch_y = srng.random_integers(low=0, high=max_y)
#print("input_shape shape: ", input_shape)
#print("pad: \"%s\""% (layer.pad,))
#print(" stride: " ,layer.stride)
#print("max_x %d max_y %d"%(max_x,max_y))
x = L.get_output(input_layer, deterministic=deterministic)
x = x[:, :,
patch_x:patch_x + w_np.shape[2], patch_y:patch_y + w_np.shape[3]]
x = T.flatten(x, 2) # N,D
w = layer.W
if layer.flip_filters:
w = w[:, :, ::-1, ::-1]
w = T.flatten(w, outdim=2).T # D,O
y = T.dot(x, w) # N,O
if layer.b is not None:
y += T.shape_padaxis(layer.b, axis=0)
return x, y
def get_conv_xy_all(layer, deterministic=True):
w_np = layer.W.get_value()
w = layer.W
if layer.flip_filters:
w = w[:, :, ::-1, ::-1]
input_layer = layer.input_layer
if layer.pad == 'same':
input_layer = L.PadLayer(layer.input_layer,
width=np.array(w_np.shape[2:])//2,
batch_ndim=2)
input_shape = L.get_output_shape(input_layer)
output_shape = L.get_output_shape(layer)
max_x = input_shape[2] - w_np.shape[2]+1
max_y = input_shape[3] - w_np.shape[3]+1
#print("input_shape shape: ", input_shape)
#print("output_shape shape: ", output_shape,np.prod(output_shape[2:]))
#print("pad: \"%s\""%layer.pad)
#print(" stride: " ,layer.stride)
#print("max_x %d max_y %d"%(max_x,max_y))
x_orig = L.get_output(input_layer, deterministic=True)
x = theano.tensor.nnet.neighbours.images2neibs(x_orig,
neib_shape=layer.filter_size,
neib_step=layer.stride,
mode='valid')
x = T.reshape(x, (x_orig.shape[0], -1,
np.prod(output_shape[2:]), np.prod(w_np.shape[2:])))
x = T.transpose(x, (0, 2, 1, 3))
x = T.reshape(x, (-1, T.prod(x.shape[2:])))
w = T.flatten(w, outdim=2).T # D,O
y = T.dot(x, w) # N,O
if layer.b is not None:
y += T.shape_padaxis(layer.b, axis=0)
return x, y