def _buildOptimizationFunction(self, X, n_steps, plr):
mu_0,logcov_0 = self._inference(X)
optdict = {}
_, logcov_f, elbo_final = self._optimizeVariationalParams(X, mu_0, logcov_0, n_steps, plr,
savedict = optdict)
diff_elbo, _ = self._estimateELBOEntropy(optdict['elbo_its'][0],optdict['elbo_its'][-1], logcov_0, logcov_f)
self.optimize_mu_logcov = theano.function([X, theano.In(n_steps, value=self.params['n_steps'], name='n_steps'),
theano.In(plr, value=self.params['param_lr'], name='plr')],
[optdict['elbo_its'], optdict['gradnorm_mu_its'],
optdict['gradnorm_logcov_its'],optdict['elbo_its'].shape[0], diff_elbo],
name = 'Optimize ELBO wrt mu/cov')
diff_elbo, _ = self._estimateELBOEntropy(optdict['elbo_its'][0], optdict['elbo_its'][-1], logcov_0, logcov_f)
self.final_elbo = theano.function([X, theano.In(n_steps, value=self.params['n_steps'], name='n_steps'),
theano.In(plr, value=self.params['param_lr'], name='plr')],
[optdict['elbo_its'][0],optdict['elbo_its'][-1], optdict['elbo_its'].shape[0],
optdict['gradnorm_mu_its'][-1],optdict['gradnorm_logcov_its'][-1],
diff_elbo], name = 'Optimize ELBO wrt mu/cov')
self.init_final_params = theano.function([X, theano.In(n_steps, value=self.params['n_steps'], name='n_steps'),
theano.In(plr, value=self.params['param_lr'], name='plr')],
[optdict['mu_its'][0],optdict['logcov_its'][0], optdict['mu_its'][-1],
optdict['logcov_its'][-1]], name = 'init/final params')
评论列表
文章目录