def probability_of_measurement(self, measurement, predicted_measurement):
"""Given a measurement and a predicted measurement, computes
probability."""
# Compute differences to real measurements.
d_true, alpha_true = measurement
d_pred, alpha_pred = predicted_measurement
# --->>> Compute difference in distance and bearing angle.
d_diff = abs(d_pred - d_true)
alpha_diff = (alpha_pred - alpha_true + pi) % (2 * pi) - pi
# Important: make sure the angle difference works correctly and does
# not return values offset by 2 pi or - 2 pi.
# You may use the following Gaussian PDF function:
# scipy.stats.norm.pdf(x, mu, sigma). With the import in the header,
# this is normal_dist.pdf(x, mu, sigma).
p_d = normal_dist.pdf(d_diff, 0, self.measurement_distance_stddev)
p_alpha = normal_dist.pdf(alpha_diff, 0, self.measurement_angle_stddev)
# Note that the two parameters sigma_d and sigma_alpha discussed
# in the lecture are self.measurement_distance_stddev and
# self.measurement_angle_stddev.
return p_d * p_alpha
python类pdf()的实例源码
def probability_of_measurement(self, measurement, predicted_measurement):
"""Given a measurement and a predicted measurement, computes
probability."""
# Compute differences to real measurements.
d_true, alpha_true = measurement
d_pred, alpha_pred = predicted_measurement
# --->>> Compute difference in distance and bearing angle.
d_diff = abs(d_pred - d_true)
alpha_diff = (alpha_pred - alpha_true + pi) % (2 * pi) - pi
# Important: make sure the angle difference works correctly and does
# not return values offset by 2 pi or - 2 pi.
# You may use the following Gaussian PDF function:
# scipy.stats.norm.pdf(x, mu, sigma). With the import in the header,
# this is normal_dist.pdf(x, mu, sigma).
p_d = normal_dist.pdf(d_diff, 0, self.measurement_distance_stddev)
p_alpha = normal_dist.pdf(alpha_diff, 0, self.measurement_angle_stddev)
# Note that the two parameters sigma_d and sigma_alpha discussed
# in the lecture are self.measurement_distance_stddev and
# self.measurement_angle_stddev.
return p_d * p_alpha
def dissimilarity_weights(m):
w = int((m-1)/2)
v = range(-w, w+1)
[x, y] = meshgrid(v, v)
g=norm.pdf(x)*norm.pdf(y)
return g
def test_austerity(self):
np.random.seed(SEED)
X = gen_X(SAMPLE_SIZE)
def vectorized_log_lik(X,theta):
return _vector_of_log_likelihoods(theta[0],theta[1],X)
def log_density_prior(theta):
return np.log(norm.pdf(theta[0],0, SIGMA_1)) + np.log(norm.pdf(theta[1],0, SIGMA_2))
sample,_ = austerity(vectorized_log_lik,log_density_prior, X,0.01,batch_size=50,chain_size=10, thinning=1, theta_t=np.random.randn(2))
assert_almost_equal(np.array([-0.2554517, 1.3805683]),sample[-1])
def _vector_of_log_likelihoods(theta_1, theta_2, x):
lik = np.log(0.5 * norm.pdf(x, theta_1, SIGMA_x) + 0.5 * norm.pdf(x, theta_1 + theta_2, SIGMA_x))
return lik
def _log_probability(theta_1,theta_2,x):
log_lik = _log_lik(theta_1, theta_2, x)
log_prior = np.log(norm.pdf(theta_1,0, SIGMA_1)) + np.log(norm.pdf(theta_2,0, SIGMA_2))
return log_lik+log_prior
def log_density_prior(theta):
return np.log(norm.pdf(theta[0],0, SIGMA_1)) + np.log(norm.pdf(theta[1],0, SIGMA_2))
generating_aust_sample.py 文件源码
项目:kernel_goodness_of_fit
作者: karlnapf
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def log_density_prior(theta):
return np.log(norm.pdf(theta[0],0, SIGMA_1)) + np.log(norm.pdf(theta[1],0, SIGMA_2))
def plot_deviance(sol, save=False, draw=True, save_as_png=True, fig_dpi=144):
if save_as_png:
save_as = 'png'
else:
save_as = 'pdf'
filename = sol.filename.replace("\\", "/").split("/")[-1].split(".")[0]
model = get_model_type(sol)
if draw or save:
fig, ax = plt.subplots(figsize=(4,3))
deviance = sol.MDL.trace('deviance')[:]
sampler_state = sol.MDL.get_state()["sampler"]
x = np.arange(sampler_state["_burn"]+1, sampler_state["_iter"]+1, sampler_state["_thin"])
plt.plot(x, deviance, "-", color="C3", label="Model deviance\nDIC = %.2f\nBPIC = %.2f" %(sol.MDL.DIC,sol.MDL.BPIC))
plt.xlabel("Iteration")
plt.ylabel("Model deviance")
plt.legend(numpoints=1, loc="best", fontsize=9)
plt.grid('on')
if sampler_state["_burn"] == 0:
plt.xscale('log')
else:
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
fig.tight_layout()
if save:
save_where = '/Figures/ModelDeviance/'
working_path = getcwd().replace("\\", "/")+"/"
save_path = working_path+save_where
print("\nSaving model deviance figure in:\n", save_path)
if not path.exists(save_path):
makedirs(save_path)
fig.savefig(save_path+'ModelDeviance-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight')
try: plt.close(fig)
except: pass
if draw: return fig
else: return None
def plot_logp(sol, save=False, draw=True, save_as_png=True, fig_dpi=144):
if save_as_png:
save_as = 'png'
else:
save_as = 'pdf'
filename = sol.filename.replace("\\", "/").split("/")[-1].split(".")[0]
model = get_model_type(sol)
if draw or save:
fig, ax = plt.subplots(figsize=(4,3))
logp = logp_trace(sol.MDL)
sampler_state = sol.MDL.get_state()["sampler"]
x = np.arange(sampler_state["_burn"]+1, sampler_state["_iter"]+1, sampler_state["_thin"])
plt.plot(x, logp, "-", color="C3")
plt.xlabel("Iteration")
plt.ylabel("Log-likelihood")
plt.legend(numpoints=1, loc="best", fontsize=9)
plt.grid('on')
if sampler_state["_burn"] == 0:
plt.xscale('log')
else:
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
fig.tight_layout()
if save:
save_where = '/Figures/LogLikelihood/'
working_path = getcwd().replace("\\", "/")+"/"
save_path = working_path+save_where
print("\nSaving logp trace figure in:\n", save_path)
if not path.exists(save_path):
makedirs(save_path)
fig.savefig(save_path+'LogLikelihood-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight')
try: plt.close(fig)
except: pass
if draw: return fig
else: return None
def plot_deviance(sol, save=False, draw=True, save_as_png=True, fig_dpi=144):
if save_as_png:
save_as = 'png'
else:
save_as = 'pdf'
filename = sol.filename.replace("\\", "/").split("/")[-1].split(".")[0]
model = get_model_type(sol)
if draw or save:
fig, ax = plt.subplots(figsize=(4,3))
deviance = sol.MDL.trace('deviance')[:]
sampler_state = sol.MDL.get_state()["sampler"]
x = np.arange(sampler_state["_burn"]+1, sampler_state["_iter"]+1, sampler_state["_thin"])
plt.plot(x, deviance, "-", color="C3", label="Model deviance\nDIC = %.2f\nBPIC = %.2f" %(sol.MDL.DIC,sol.MDL.BPIC))
plt.xlabel("Iteration")
plt.ylabel("Model deviance")
plt.legend(numpoints=1, loc="best")
plt.grid('on')
if sampler_state["_burn"] == 0:
plt.xscale('log')
else:
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
fig.tight_layout()
if save:
save_where = '/Figures/ModelDeviance/'
working_path = getcwd().replace("\\", "/")+"/"
save_path = working_path+save_where
print("\nSaving model deviance figure in:\n", save_path)
if not path.exists(save_path):
makedirs(save_path)
fig.savefig(save_path+'ModelDeviance-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight')
try: plt.close(fig)
except: pass
if draw: return fig
else: return None
def plot_logp(sol, save=False, draw=True, save_as_png=True, fig_dpi=144):
if save_as_png:
save_as = 'png'
else:
save_as = 'pdf'
filename = sol.filename.replace("\\", "/").split("/")[-1].split(".")[0]
model = get_model_type(sol)
if draw or save:
fig, ax = plt.subplots(figsize=(4,3))
logp = logp_trace(sol.MDL)
sampler_state = sol.MDL.get_state()["sampler"]
x = np.arange(sampler_state["_burn"]+1, sampler_state["_iter"]+1, sampler_state["_thin"])
plt.plot(x, logp, "-", color="C3")
plt.xlabel("Iteration")
plt.ylabel("Log-likelihood")
plt.legend(numpoints=1, loc="best")
plt.grid('on')
if sampler_state["_burn"] == 0:
plt.xscale('log')
else:
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
fig.tight_layout()
if save:
save_where = '/Figures/LogLikelihood/'
working_path = getcwd().replace("\\", "/")+"/"
save_path = working_path+save_where
print("\nSaving logp trace figure in:\n", save_path)
if not path.exists(save_path):
makedirs(save_path)
fig.savefig(save_path+'LogLikelihood-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight')
try: plt.close(fig)
except: pass
if draw: return fig
else: return None
def _fprime(self, sigma):
logSoverK = log(self.S/self.K)
n12 = ((self.r + sigma**2/2)*self.T)
numerd1 = logSoverK + n12
d1 = numerd1/(sigma*sqrt(self.T))
return self.S*sqrt(self.T)*norm.pdf(d1)*exp(-self.r*self.T)
def CompPrior(X,MCPar):
prior=np.ones((X.shape[0],1))
log_prior=np.zeros((X.shape[0],1))
for ii in xrange(0,X.shape[0]):
if (MCPar.Prior[0:9]=='Prior_CRN'):
# Uniform prior for Ninh
prior[ii,0] = 1.0/(MCPar.ub[0,MCPar.idx_unif2]-MCPar.lb[0,MCPar.idx_unif2])
log_prior[ii,0]=np.log(prior[ii,0])
# Uniform prior for Age
prior[ii,0] = 1.0/(MCPar.ub[0,MCPar.idx_unif1]-MCPar.lb[0,MCPar.idx_unif1])
log_prior[ii,0]=np.log(prior[ii,0])
if (MCPar.Prior[10]=='1'): # Gaussian prior for erosion rate
prior[ii,0] = prior[ii,0]*norm.pdf(X[ii,MCPar.idx_norm],MCPar.pmu,MCPar.psd)
log_prior[ii,0]+=np.log(prior[ii,0])
else: # Uniform prior for erosion rate
prior[ii,0] = prior[ii,0]*(1.0/(MCPar.ub[0,MCPar.idx_unif0]-MCPar.lb[0,MCPar.idx_unif0]))
log_prior[ii,0]+=np.log(prior[ii,0])
# Check log_p for inf
if np.isinf(log_prior[ii,0])==True:
log_prior[ii,0]=1e-200
else: # Uniform prior for every variable
for jj in xrange(0,MCPar.n):
prior[ii,0] = prior[ii,0]*(1.0/(MCPar.ub[0,jj]-MCPar.lb[0,jj]))
log_prior[ii,0]+=np.log(prior[ii,0])
return prior, log_prior
def __genprior(self, x, distr='uniform', mu=0, sig=1):
"""Generate prior probability distribution for variable.
Arguments
---------
x : 1D numpy array (float64)
points to evaluate the density at.
distr : string
Distribution to use a prior :
'uniform' (default) discrete uniform distribution
'normal' normal distribution
'gamma' gamma distribution
'beta' beta distribution
mu : scalar float
first parameter of distr distribution (check scipy for parameterization)
sig : scalar float
second parameter of distr distribution
Returns
-------
1D numpy array of prior probabilities (unnormalized)
"""
if distr == 'uniform':
nx = len(x)
p = np.ones(nx) / nx
elif distr == 'normal':
p = norm.pdf(x, mu, sig)
elif distr == 'beta':
p = beta.pdf(x, mu, sig)
elif distr == 'gamma':
p = gamma.pdf(x, mu, scale=sig)
else:
nx = len(x)
p = np.ones(nx) / nx
return p
def __entropy(self, pdf):
"""Calculate shannon entropy of posterior distribution.
Arguments
---------
pdf : ndarray (float64)
posterior distribution of psychometric curve parameters for each stimuli
Returns
-------
1D numpy array (float64) : Shannon entropy of posterior for each stimuli
"""
# Marginalize out all nuisance parameters, i.e. all except alpha and sigma
postDims = np.ndim(pdf)
if self.marginalize == True:
while postDims > 3: # marginalize out second-to-last dimension, last dim is x
pdf = np.sum(pdf, axis=-2)
postDims -= 1
# find expected entropy, suppress divide-by-zero and invalid value warnings
# as this is handled by the NaN redefinition to 0
with np.errstate(divide='ignore', invalid='ignore'):
entropy = np.multiply(pdf, np.log(pdf))
entropy[np.isnan(entropy)] = 0 # define 0*log(0) to equal 0
dimSum = tuple(range(postDims - 1)) # dimensions to sum over. also a Chinese dish
entropy = -(np.sum(entropy, axis=dimSum))
return entropy
def minEntropyStim(self):
"""Find the stimulus intensity based on the expected information gain.
Minimum Shannon entropy is used as selection criterion for the stimulus intensity in the upcoming trial.
"""
self.pdf = self.pdf
self.nX = len(self.stimRange)
self.nDims = np.ndim(self.pdf)
# make pdf the same dims as conditional prob table likelihood
self.pdfND = np.expand_dims(self.pdf, axis=self.nDims) # append new axis
self.pdfND = np.tile(self.pdfND, (self.nX)) # tile along new axis
# Probabilities of response r (succes, failure) after presenting a stimulus
# with stimulus intensity x at the next trial, multiplied with the prior (pdfND)
self.pTplus1success = np.multiply(self.likelihood, self.pdfND)
self.pTplus1failure = self.pdfND - self.pTplus1success
# Probability of success or failure given stimulus intensity x, p(r|x)
self.sumAxes = tuple(range(self.nDims)) # sum over all axes except the stimulus intensity axis
self.pSuccessGivenx = np.sum(self.pTplus1success, axis=self.sumAxes)
self.pFailureGivenx = np.sum(self.pTplus1failure, axis=self.sumAxes)
# Posterior probability of parameter values given stimulus intensity x and response r
# p(alpha, sigma | x, r)
self.posteriorTplus1success = self.pTplus1success / self.pSuccessGivenx
self.posteriorTplus1failure = self.pTplus1failure / self.pFailureGivenx
# Expected entropy for the next trial at intensity x, producing response r
self.entropySuccess = self.__entropy(self.posteriorTplus1success)
self.entropyFailure = self.__entropy(self.posteriorTplus1failure)
self.expectEntropy = np.multiply(self.entropySuccess, self.pSuccessGivenx) + np.multiply(self.entropyFailure,
self.pFailureGivenx)
self.minEntropyInd = np.argmin(self.expectEntropy) # index of smallest expected entropy
self.xCurrent = self.stimRange[self.minEntropyInd] # stim intensity at minimum expected entropy
self.iTrial += 1
if self.iTrial == (self.nTrials - 1):
self.stop = 1
def expected_improvement(self, optimiser, x):
mu,std = optimiser.predict([x], return_std=True)
current_best = max([score for score, params in self.hyperparam_history])
gamma = (current_best - mu[0])/std[0]
exp_improv = std[0] * (gamma * norm.cdf(gamma) + norm.pdf(gamma))
return -1 * exp_improv
def test_extents_weighted():
xs = np.random.uniform(low=-4, high=4, size=100000)
weights = norm.pdf(xs)
low, high = get_extents(xs, weights)
threshold = 0.5
assert np.abs(low + 4) < threshold
assert np.abs(high - 4) < threshold
def test_megkde_1d_basic():
# Draw from normal, fit KDE, see if sampling from kde's pdf recovers norm
np.random.seed(0)
data = np.random.normal(loc=0, scale=1.0, size=2000)
xs = np.linspace(-3, 3, 100)
ys = MegKDE(data).evaluate(xs)
cs = ys.cumsum()
cs /= cs[-1]
cs[0] = 0
samps = interp1d(cs, xs)(np.random.uniform(size=10000))
mu, std = norm.fit(samps)
assert np.isclose(mu, 0, atol=0.1)
assert np.isclose(std, 1.0, atol=0.1)