def genLossAndGradient(self):
# establish loss
kl_div = lasagne.layers.get_output(self.kl_loss_layer,
deterministic=False)
kl_loss = lasagne.objectives.aggregate(kl_div, mode='sum')
# assume the reconstructed all with standard Gaussian distribution
recons_loss = lasagne.objectives.squared_error(self.recons_var,
self.pose_input_var)
recons_loss = recons_loss*0.5
recons_loss = lasagne.objectives.aggregate(recons_loss, mode='sum')
# calculate gradient
loss = kl_loss + recons_loss
# loss = recons_loss
lr_var = T.fscalar('lr')
update_params = self.encoder_params + self.decoder_params
update_vars = lasagne.updates.adam(loss, update_params,
lr_var, self.b1)
# compile the function
self.train_fn = theano.function(
[self.pose_input_var, self.noise_input_var, lr_var],
loss,
updates = update_vars)
self.recons_fn = theano.function(
[self.pose_input_var, self.noise_input_var],
self.recons_tvar
)
self.encode_fn = theano.function(
[self.pose_input_var, self.noise_input_var],
self.z_tvar
)
print '[PoseVAE]function compiled'
评论列表
文章目录