def createCNN(self):
net = {}
net['input'] = lasagne.layers.InputLayer(shape=(None, self.nChannels, self.imageHeight, self.imageWidth), input_var=self.data)
print("Input shape: {0}".format(net['input'].output_shape))
#STAGE 1
net['s1_conv1_1'] = batch_norm(Conv2DLayer(net['input'], 64, 3, pad='same', W=GlorotUniform('relu')))
net['s1_conv1_2'] = batch_norm(Conv2DLayer(net['s1_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu')))
net['s1_pool1'] = lasagne.layers.Pool2DLayer(net['s1_conv1_2'], 2)
net['s1_conv2_1'] = batch_norm(Conv2DLayer(net['s1_pool1'], 128, 3, pad=1, W=GlorotUniform('relu')))
net['s1_conv2_2'] = batch_norm(Conv2DLayer(net['s1_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu')))
net['s1_pool2'] = lasagne.layers.Pool2DLayer(net['s1_conv2_2'], 2)
net['s1_conv3_1'] = batch_norm (Conv2DLayer(net['s1_pool2'], 256, 3, pad=1, W=GlorotUniform('relu')))
net['s1_conv3_2'] = batch_norm (Conv2DLayer(net['s1_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu')))
net['s1_pool3'] = lasagne.layers.Pool2DLayer(net['s1_conv3_2'], 2)
net['s1_conv4_1'] = batch_norm(Conv2DLayer(net['s1_pool3'], 512, 3, pad=1, W=GlorotUniform('relu')))
net['s1_conv4_2'] = batch_norm (Conv2DLayer(net['s1_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu')))
net['s1_pool4'] = lasagne.layers.Pool2DLayer(net['s1_conv4_2'], 2)
net['s1_fc1_dropout'] = lasagne.layers.DropoutLayer(net['s1_pool4'], p=0.5)
net['s1_fc1'] = batch_norm(lasagne.layers.DenseLayer(net['s1_fc1_dropout'], num_units=256, W=GlorotUniform('relu')))
net['s1_output'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=136, nonlinearity=None)
net['s1_landmarks'] = LandmarkInitLayer(net['s1_output'], self.initLandmarks)
for i in range(1, self.nStages):
self.addDANStage(i + 1, net)
net['output'] = net['s' + str(self.nStages) + '_landmarks']
return net
FaceAlignmentTraining.py 文件源码
python
阅读 19
收藏 0
点赞 0
评论 0
评论列表
文章目录