def build_latent_alignment_layer(self, pose_vae, \
origin_layer = None,\
quad_layer = None):
self.pose_z_dim = lasagne.layers.get_output_shape(pose_vae.z_layer)[1]
self.z_dim = self.pose_z_dim
if origin_layer is not None:
self.z_dim += 3
if quad_layer is not None:
self.z_dim += 4
align_w = CreateParam(InitW,
(self.z_dim, self.z_dim),
'align_w')
align_b = CreateParam(InitBeta,
(self.z_dim,),
'align_b')
align_g = CreateParam(InitGamma,
(self.z_dim,),
'align_g')
latent_layer = pose_vae.z_layer
if origin_layer is not None:
latent_layer = lasagne.layers.ConcatLayer([latent_layer,
self.origin_input_layer],
axis = 1)
if quad_layer is not None:
latent_layer = lasagne.layers.ConcatLayer([latent_layer,
quad_layer],
axis = 1)
print 'latent_layer output shape = {}'\
.format(lasagne.layers.get_output_shape(latent_layer))
self.latent_layer = latent_layer
self.latent_var = lasagne.layers.get_output(self.latent_layer,
deterministic=False)
self.latent_tvar = lasagne.layers.get_output(self.latent_layer,
deterministic=True)
# use None input, to adapt z from both pose-vae and real-test
latent_layer = lasagne.layers.InputLayer(shape=(None,self.z_dim))
alignment_layer = batch_norm(
lasagne.layers.DenseLayer(latent_layer,
num_units = self.z_dim,
nonlinearity=None,
W=align_w),
beta=align_b, gamma=align_g)
self.alignment_params = [align_w, align_b, align_g]
nPara = len(self.alignment_params) + 2
self.alignment_all_params =\
lasagne.layers.get_all_params(alignment_layer)[-nPara:]
return alignment_layer
评论列表
文章目录