def build_cnn(self, input_var=None):
# Building the network
layer_in = InputLayer(shape=(None, 3, 32, 32), input_var=input_var)
# Conv1
# [NOTE]: normal vs. truncated normal?
# [NOTE]: conv in lasagne is not same as it in TensorFlow.
layer = ConvLayer(layer_in, num_filters=64, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify,
pad='same', W=lasagne.init.HeNormal(), flip_filters=False)
# Pool1
layer = MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2))
# Norm1
layer = LocalResponseNormalization2DLayer(layer, alpha=0.001 / 9.0, k=1.0, beta=0.75)
# Conv2
layer = ConvLayer(layer, num_filters=64, filter_size=(5, 5), stride=(1, 1), nonlinearity=rectify,
pad='same', W=lasagne.init.HeNormal(), flip_filters=False)
# Norm2
# [NOTE]: n must be odd, but n in Chang's code is 4?
layer = LocalResponseNormalization2DLayer(layer, alpha=0.001 / 9.0, k=1.0, beta=0.75)
# Pool2
layer = MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2))
# Reshape
layer = lasagne.layers.ReshapeLayer(layer, shape=([0], -1))
# Dense3
layer = DenseLayer(layer, num_units=384, W=lasagne.init.HeNormal(), b=lasagne.init.Constant(0.1))
# Dense4
layer = DenseLayer(layer, num_units=192, W=lasagne.init.Normal(std=0.04), b=lasagne.init.Constant(0.1))
# Softmax
layer = DenseLayer(layer, num_units=self.output_size,
W=lasagne.init.Normal(std=1. / 192.0), nonlinearity=softmax)
return layer
评论列表
文章目录