def _buildEvaluationFunctions(self, X,n_steps,plr):
""" Build functions for evaluation. X: input,evaluation_bound: bound for evaluation
evaldict: dictionary containing z/mu/logcov and other arrays that might need inspection
additional_inputs: used to support finopt where you need to have n_steps etc
"""
self._p('Evaluation: Setting opt_method: ADAM, 100 steps w/ 8e-3 lr')
evaldict0, evaldictopt, evaldictf = {}, {}, {}
elbo_init = self._ELBO(X, savedict = evaldict0)
elbo_init_batch = evaldict0['elbo_batch']
mu_f, logcov_f, _ = self._optimizeVariationalParams(X,evaldict0['mu_q'],evaldict0['logcov_q'],
n_steps, plr, savedict = evaldictopt)
elbo_final = self._ELBO(X, mu_q = mu_f, logcov_q = logcov_f, savedict = evaldictf)
elbo_final_batch = evaldictf['elbo_batch']
fxn_inputs = [X]
init_val = 100
if self.params['data_type']=='image':
init_val = 5
fxn_inputs.append(theano.In(n_steps, value = init_val, name = 'n_steps'))
fxn_inputs.append(theano.In(plr, value = 8e-3, name = 'plr'))
diff_elbo, _ = self._estimateELBOEntropy(elbo_init, elbo_final, evaldict0['logcov_q'], evaldictf['logcov_q'])
self.evaluate = theano.function(fxn_inputs, [elbo_init, elbo_final,evaldictopt['n_steps'], diff_elbo], name = 'Evaluate')
self.reconstruct= theano.function([evaldictf['z']], evaldictf['mean_p'], name='Reconstruct')
self.inference = theano.function(fxn_inputs, [evaldictf['z'], evaldictf['mu_q'], evaldictf['logcov_q'] ],
name = 'Posterior Inference')
self.inference0 = theano.function([X], [evaldict0['z'], evaldict0['mu_q'], evaldict0['logcov_q'] ,evaldict0['KL']],
name = 'Posterior Inference 0 ')
self.inferencef = theano.function(fxn_inputs, [evaldictf['z'],
evaldictf['mu_q'], evaldictf['logcov_q'] ,evaldictf['KL']],
name = 'Posterior Inference F ')
#Create a theano input to estimate the Jacobian with respect to
z0 = T.vector('z')
z0.tag.test_value = np.random.randn(self.params['dim_stochastic']).astype(config.floatX)
"""
Estimating Jacobian Vectors
"""
additional = {}
lsf = self._conditionalXgivenZ(z0,additional=additional) #This computes Jacobian wrt log-probabilities, For poisson models this is the logmean
if self.params['data_type']=='real':
lsf = lsf[0]
#Grad wrt energy
jacob_energy = theano.gradient.jacobian(additional['E'],wrt=z0)
jacob_logprobs = theano.gradient.jacobian(lsf,wrt=z0)
jacob_probs = theano.gradient.jacobian(T.exp(lsf),wrt=z0)
jacob_logprobs_mnist = theano.gradient.jacobian(T.log(lsf),wrt=z0) #For use w/ binarized mnist only
self.jacobian_logprobs = theano.function([z0],jacob_logprobs,name='Jacobian wrt Log-Probs')
self.jacobian_probs = theano.function([z0],jacob_probs,name='Jacobian')
self.jacobian_energy = theano.function([z0],jacob_energy,name='Jacobian wrt energy')
#Evaluating perplexity
if self.params['data_type']=='bow':
X_count = X.sum(1,keepdims=True)
self.evaluatePerp = theano.function(fxn_inputs, [(elbo_init_batch/X_count).sum(),
(elbo_final_batch/X_count).sum(), evaldictopt['n_steps'], diff_elbo])
self.debugModel = theano.function([X], [evaldict0['elbo_batch'].sum(), evaldict0['negCLL'].sum(),evaldict0['KLmat'].sum()])
################################ Building Model #####################
评论列表
文章目录