def _invert_layer(self, layer, feeder):
layer_type = type(layer)
if L.get_output_shape(feeder) != L.get_output_shape(layer):
feeder = L.ReshapeLayer(feeder, (-1,)+L.get_output_shape(layer)[1:])
if layer_type is L.InputLayer:
return self._invert_InputLayer(layer, feeder)
elif layer_type is L.FlattenLayer:
return self._invert_FlattenLayer(layer, feeder)
elif layer_type is L.DenseLayer:
return self._invert_DenseLayer(layer, feeder)
elif layer_type is L.Conv2DLayer:
return self._invert_Conv2DLayer(layer, feeder)
elif layer_type is L.DropoutLayer:
return self._invert_DropoutLayer(layer, feeder)
elif layer_type in [L.MaxPool2DLayer, L.MaxPool1DLayer]:
return self._invert_MaxPoolingLayer(layer, feeder)
elif layer_type is L.PadLayer:
return self._invert_PadLayer(layer, feeder)
elif layer_type is L.SliceLayer:
return self._invert_SliceLayer(layer, feeder)
elif layer_type is L.LocalResponseNormalization2DLayer:
return self._invert_LocalResponseNormalisation2DLayer(layer, feeder)
elif layer_type is L.GlobalPoolLayer:
return self._invert_GlobalPoolLayer(layer, feeder)
else:
return self._invert_UnknownLayer(layer, feeder)
python类LocalResponseNormalization2DLayer()的实例源码
def build_cnn(self, input_var=None):
# Building the network
layer_in = InputLayer(shape=(None, 3, 32, 32), input_var=input_var)
# Conv1
# [NOTE]: normal vs. truncated normal?
# [NOTE]: conv in lasagne is not same as it in TensorFlow.
layer = ConvLayer(layer_in, num_filters=64, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify,
pad='same', W=lasagne.init.HeNormal(), flip_filters=False)
# Pool1
layer = MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2))
# Norm1
layer = LocalResponseNormalization2DLayer(layer, alpha=0.001 / 9.0, k=1.0, beta=0.75)
# Conv2
layer = ConvLayer(layer, num_filters=64, filter_size=(5, 5), stride=(1, 1), nonlinearity=rectify,
pad='same', W=lasagne.init.HeNormal(), flip_filters=False)
# Norm2
# [NOTE]: n must be odd, but n in Chang's code is 4?
layer = LocalResponseNormalization2DLayer(layer, alpha=0.001 / 9.0, k=1.0, beta=0.75)
# Pool2
layer = MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2))
# Reshape
layer = lasagne.layers.ReshapeLayer(layer, shape=([0], -1))
# Dense3
layer = DenseLayer(layer, num_units=384, W=lasagne.init.HeNormal(), b=lasagne.init.Constant(0.1))
# Dense4
layer = DenseLayer(layer, num_units=192, W=lasagne.init.Normal(std=0.04), b=lasagne.init.Constant(0.1))
# Softmax
layer = DenseLayer(layer, num_units=self.output_size,
W=lasagne.init.Normal(std=1. / 192.0), nonlinearity=softmax)
return layer
def build_model(self, input_var, forward, dropout):
net = dict()
net['input'] = InputLayer((None, 3, None, None), input_var=input_var)
net['conv1/7x7_s2'] = ConvLayer(
net['input'], 64, 7, stride=2, pad=3, flip_filters=False)
net['pool1/3x3_s2'] = PoolLayer(
net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False)
net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1)
net['conv2/3x3_reduce'] = ConvLayer(
net['pool1/norm1'], 64, 1, flip_filters=False)
net['conv2/3x3'] = ConvLayer(
net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False)
net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1)
net['pool2/3x3_s2'] = PoolLayerDNN(net['conv2/norm2'], pool_size=3, stride=2)
net.update(self.build_inception_module('inception_3a',
net['pool2/3x3_s2'],
[32, 64, 96, 128, 16, 32]))
net.update(self.build_inception_module('inception_3b',
net['inception_3a/output'],
[64, 128, 128, 192, 32, 96]))
net['pool3/3x3_s2'] = PoolLayerDNN(net['inception_3b/output'],
pool_size=3, stride=2)
net.update(self.build_inception_module('inception_4a',
net['pool3/3x3_s2'],
[64, 192, 96, 208, 16, 48]))
net.update(self.build_inception_module('inception_4b',
net['inception_4a/output'],
[64, 160, 112, 224, 24, 64]))
net.update(self.build_inception_module('inception_4c',
net['inception_4b/output'],
[64, 128, 128, 256, 24, 64]))
net.update(self.build_inception_module('inception_4d',
net['inception_4c/output'],
[64, 112, 144, 288, 32, 64]))
net.update(self.build_inception_module('inception_4e',
net['inception_4d/output'],
[128, 256, 160, 320, 32, 128]))
net['pool4/3x3_s2'] = PoolLayerDNN(net['inception_4e/output'],
pool_size=3, stride=2)
net.update(self.build_inception_module('inception_5a',
net['pool4/3x3_s2'],
[128, 256, 160, 320, 32, 128]))
net.update(self.build_inception_module('inception_5b',
net['inception_5a/output'],
[128, 384, 192, 384, 48, 128]))
net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output'])
if forward:
#net['fc6'] = DenseLayer(net['pool5/7x7_s1'], num_units=1000)
net['prob'] = DenseLayer(net['pool5/7x7_s1'], num_units=4, nonlinearity=softmax)
else:
net['dropout1'] = DropoutLayer(net['pool5/7x7_s1'], p=dropout)
#net['fc6'] = DenseLayer(net['dropout1'], num_units=1000)
#net['dropout2'] = DropoutLayer(net['fc6'], p=dropout)
net['prob'] = DenseLayer(net['dropout1'], num_units=4, nonlinearity=softmax)
return net