def plot_natural_gauss(x, obs_eta1, obs_eta2, obs_loss,
title, epsilon=0.05, breaks=300):
# compute grid
eta1_grid = np.linspace(start=min(obs_eta1) - epsilon,
stop=max(obs_eta1) + epsilon,
num=breaks)
eta2_grid = np.linspace(start=min(obs_eta2) - epsilon,
stop=min(max(obs_eta2) + epsilon, 0.0),
num=breaks)
eta1_grid, eta2_grid = np.meshgrid(eta1_grid, eta2_grid)
mu_grid = get_mu(eta1_grid, eta2_grid)
sigma_grid = get_sigma(eta2_grid)
loss_grid = -np.sum(
[sp.norm(loc=mu_grid, scale=sigma_grid).logpdf(x=xi) for xi in x],
axis=0)
# plot contours and loss
fig, ax = plt.subplots(nrows=1, ncols=2)
ax[0].contour(eta1_grid, eta2_grid, loss_grid,
levels=np.linspace(np.min(loss_grid),
np.max(loss_grid),
breaks),
cmap='terrain')
ax[0].plot(obs_eta1, obs_eta2, color='red', alpha=0.5,
linestyle='dashed', linewidth=1, marker='.', markersize=3)
ax[0].set_xlabel('eta1')
ax[0].set_ylabel('eta2')
ax[1].plot(range(len(obs_loss)), obs_loss)
ax[1].set_xlabel('iter')
# ax[1].set_ylabel('loss')
plt.suptitle('{}'.format(title))
plt.show()
python类norm()的实例源码
def __init__(self, data, mleDiffCutoff=1.0):
print [min(data), max(data)]
distributions = [st.laplace, st.norm, st.expon, st.dweibull, st.invweibull, st.lognorm, st.uniform]
mles = []
for distribution in distributions:
pars = distribution.fit(data)
mle = distribution.nnlf(pars, data)
mles.append(mle)
results = [(distribution.name, mle) for distribution, mle in zip(distributions, mles)]
for dist in sorted(zip(distributions, mles), key=lambda d: d[1]):
print dist
best_fit = sorted(zip(distributions, mles), key=lambda d: d[1])[0]
print 'Best fit reached using {}, MLE value: {}'.format(best_fit[0].name, best_fit[1])
self.modelSets = []
self.modelOptions = [mod[0].name for mod in sorted(zip(distributions, mles), key=lambda d: d[1])]
## list of scipy distribution ids sorted by their MLEs given the data
## [0] is best, [1], next best and so on
for model in sorted(zip(distributions, mles), key=lambda d: d[1]):
if(model[0].name in getAvailableDistributionsByScipyIds()):
try:
modelDist = getDistributionByScipyId(model[0].name, data)
self.modelSets.append([modelDist, model[1]])
## append the distribution object and the MLE value for this
## particular distribution & the data
## ah frig, I think in the bimodal case, it will be
## something like
except RuntimeError:
pass
else:
## nothing that can be done here, if we dont have a object of
## the distribution needed available, we cant do much about it
pass
def normal(self,samples):
"""
Sampling from a Normal distribution
Parameters:
mean (location)
variance (squared scale)
------------------------------------------------------------------------
- samples: number of values that will be returned.
"""
locMean=float(self.__params[0]*1.0)
scaleVariance=np.sqrt(float(self.__params[1]*1.0))
distro=norm(loc=locMean,scale=scaleVariance)
f=distro.rvs(size=samples)
return f
def __init__(self):
self.distr = stats.norm(loc=0,scale=1.0)
def pvalues(self):
if self.use_t:
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
return stats.t.sf(np.abs(self.tvalues), df_resid)*2
else:
return stats.norm.sf(np.abs(self.tvalues))*2
def pvalues(self):
if self.use_t:
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
return stats.t.sf(np.abs(self.tvalues), df_resid)*2
else:
return stats.norm.sf(np.abs(self.tvalues))*2
def pvalues(self):
if self.use_t:
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
return stats.t.sf(np.abs(self.tvalues), df_resid)*2
else:
return stats.norm.sf(np.abs(self.tvalues))*2
traffic_distribution.py 文件源码
项目:MultiRelaySelectionDFCoopCom
作者: JianshanZhou
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def __init__(self, scenario_flag = "Freeway_Free"):
"""
Totally five scenarios are supported here:
Freeway_Night, Freeway_Free, Freeway_Rush;
Urban_Peak, Urban_Nonpeak.
The PDFs of the vehicle speed and the inter-vehicle space are adapted
from existing references.
"""
if scenario_flag == "Freeway_Night":
self.headway_random = expon(0.0, 1.0/256.41)
meanSpeed = 30.93 #m/s
stdSpeed = 1.2 #m/s
elif scenario_flag == "Freeway_Free":
self.headway_random = lognorm(0.75, 0.0, np.exp(3.4))
meanSpeed = 29.15 #m/s
stdSpeed = 1.5 #m/s
elif scenario_flag == "Freeway_Rush":
self.headway_random = lognorm(0.5, 0.0, np.exp(2.5))
meanSpeed = 10.73 #m/s
stdSpeed = 2.0 #m/s
elif scenario_flag == "Urban_Peak":
scale = 1.096
c = 0.314
loc = 0.0
self.headway_random = fisk(c, loc, scale)
meanSpeed = 6.083 #m/s
stdSpeed = 1.2 #m/s
elif scenario_flag == "Urban_Nonpeak":
self.headway_random = lognorm(0.618, 0.0, np.exp(0.685))
meanSpeed = 12.86 #m/s
stdSpeed = 1.5 #m/s
else:
raise
self.speed_random = norm(meanSpeed, stdSpeed)
generate_synth_data.py 文件源码
项目:using-python-for-research
作者: scheung38
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def generate_synth_data(n=50):
"""
Create two sets of points from bivariate normal distributions.
:param n:
:return:
"""
points = np.concatenate((ss.norm(0, 1).rvs((n, 2)), ss.norm(1, 1).rvs((n, 2))), axis=0)
outcomes = np.concatenate((np.repeat(0, n), np.repeat(1, n)))
return points, outcomes
def test_gaussian_multiple_populations(db_path, sampler):
sigma_x = 1
sigma_y = .5
y_observed = 2
def model(args):
return {"y": st.norm(args['x'], sigma_y).rvs()}
models = [model]
models = list(map(SimpleModel, models))
nr_populations = 4
population_size = ConstantPopulationSize(600)
parameter_given_model_prior_distribution = [Distribution(x=st.norm(0, sigma_x))]
abc = ABCSMC(models, parameter_given_model_prior_distribution,
MinMaxDistanceFunction(measures_to_use=["y"]),
population_size,
eps=MedianEpsilon(.2),
sampler=sampler)
abc.new(db_path, {"y": y_observed})
minimum_epsilon = -1
abc.do_not_stop_when_only_single_model_alive()
history = abc.run(minimum_epsilon, max_nr_populations=nr_populations)
posterior_x, posterior_weight = history.get_distribution(0, None)
posterior_x = posterior_x["x"].as_matrix()
sort_indices = sp.argsort(posterior_x)
f_empirical = sp.interpolate.interp1d(sp.hstack((-200, posterior_x[sort_indices], 200)),
sp.hstack((0, sp.cumsum(posterior_weight[sort_indices]), 1)))
sigma_x_given_y = 1 / sp.sqrt(1 / sigma_x**2 + 1 / sigma_y**2)
mu_x_given_y = sigma_x_given_y**2 * y_observed / sigma_y**2
expected_posterior_x = st.norm(mu_x_given_y, sigma_x_given_y)
x = sp.linspace(-8, 8)
max_distribution_difference = sp.absolute(f_empirical(x) - expected_posterior_x.cdf(x)).max()
assert max_distribution_difference < 0.052
assert history.max_t == nr_populations-1
mean_emp, std_emp = mean_and_std(posterior_x, posterior_weight)
assert abs(mean_emp - mu_x_given_y) < .07
assert abs(std_emp - sigma_x_given_y) < .12
def test_gaussian_multiple_populations_adpative_population_size(db_path, sampler):
sigma_x = 1
sigma_y = .5
y_observed = 2
def model(args):
return {"y": st.norm(args['x'], sigma_y).rvs()}
models = [model]
models = list(map(SimpleModel, models))
nr_populations = 4
population_size = AdaptivePopulationSize(600)
parameter_given_model_prior_distribution = [
Distribution(x=st.norm(0, sigma_x))]
abc = ABCSMC(models, parameter_given_model_prior_distribution,
MinMaxDistanceFunction(measures_to_use=["y"]),
population_size,
eps=MedianEpsilon(.2),
sampler=sampler)
abc.new(db_path, {"y": y_observed})
minimum_epsilon = -1
abc.do_not_stop_when_only_single_model_alive()
history = abc.run(minimum_epsilon, max_nr_populations=nr_populations)
posterior_x, posterior_weight = history.get_distribution(0, None)
posterior_x = posterior_x["x"].as_matrix()
sort_indices = sp.argsort(posterior_x)
f_empirical = sp.interpolate.interp1d(sp.hstack((-200, posterior_x[sort_indices], 200)),
sp.hstack((0, sp.cumsum(posterior_weight[sort_indices]), 1)))
sigma_x_given_y = 1 / sp.sqrt(1 / sigma_x ** 2 + 1 / sigma_y ** 2)
mu_x_given_y = sigma_x_given_y ** 2 * y_observed / sigma_y ** 2
expected_posterior_x = st.norm(mu_x_given_y, sigma_x_given_y)
x = sp.linspace(-8, 8)
max_distribution_difference = sp.absolute(f_empirical(x) - expected_posterior_x.cdf(x)).max()
assert max_distribution_difference < 0.15
assert history.max_t == nr_populations - 1
mean_emp, std_emp = mean_and_std(posterior_x, posterior_weight)
assert abs(mean_emp - mu_x_given_y) < .07
assert abs(std_emp - sigma_x_given_y) < .12
def proposal(X, W, Centroids, bandwidth):
d = np.zeros_like(X)
for c in xrange(Centroids.shape[0]):
d += norm(Centroids[c], bandwidth).pdf(X) * W[c]
d /= np.sum(W)
return d
def RBFfeatures(X, Centroids, bandwidth):
d = norm(Centroids[0], bandwidth).pdf(X)
d = d.reshape(-1, 1)
for c in xrange(Centroids.shape[0] - 1):
d = np.concatenate((d, (norm(Centroids[c + 1], bandwidth).pdf(X)).reshape(-1, 1)), 1)
return d
def proposal(X, W, Centroids, bandwidth):
d = np.zeros_like(X)
for c in xrange(Centroids.shape[0]):
d += norm(Centroids[c], bandwidth).pdf(X) * W[c]
d /= np.sum(W)
return d
def test_norm_logpdf_generator(x, mu, std):
def test(self):
scipy_d = norm(mu, std) # scipy normal distribution
logpdf_scipy = scipy_d.logpdf(x)
logpdf = lth.norm_logpdf(x, mu, std)
# self.assertEqual(True, False)
np.testing.assert_allclose(logpdf, logpdf_scipy)
return test
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
def stat_power(control, trial, ci=0.975):
'''
Calculates statistical power.
Parameters
----------
control : array-type
Control population sample.
trial: array-type
Trial population sample.
Returns
-------
Float
'''
# Calculate the mean, se and me-(4 std)
control = np.log(control)
trial = np.log(trial)
control_mean = np.mean(control)
trial_mean = np.mean(trial)
control_se = np.std(control, ddof=1) / np.sqrt(control.shape[0])
trial_se = np.std(trial, ddof=1) / np.sqrt(trial.shape[0])
# Create a normal distribution based on mean and se
null_norm = sc.norm(control_mean, control_se)
alt_norm = sc.norm(trial_mean, trial_se)
# Calculate the rejection values (X*)
reject_low = null_norm.ppf((1 - ci) / 2)
reject_high = null_norm.ppf(ci + (1 - ci) / 2)
# Calculate power
power_lower = alt_norm.cdf(reject_low)
power_higher = 1 - alt_norm.cdf(reject_high)
power = (power_lower + power_higher) * 100
return power
normal_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def testNormalLogPDFMultidimensional(self):
with self.test_session():
batch_size = 6
mu = constant_op.constant([[3.0, -3.0]] * batch_size)
sigma = constant_op.constant([[math.sqrt(10.0), math.sqrt(15.0)]] *
batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
normal = normal_lib.Normal(loc=mu, scale=sigma)
expected_log_pdf = stats.norm(mu.eval(), sigma.eval()).logpdf(x)
log_pdf = normal.log_prob(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllEqual(normal.batch_shape().eval(), log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), log_pdf.eval().shape)
self.assertAllEqual(normal.get_batch_shape(), log_pdf.get_shape())
self.assertAllEqual(normal.get_batch_shape(), log_pdf.eval().shape)
pdf = normal.prob(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
self.assertAllEqual(normal.batch_shape().eval(), pdf.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), pdf_values.shape)
self.assertAllEqual(normal.get_batch_shape(), pdf.get_shape())
self.assertAllEqual(normal.get_batch_shape(), pdf_values.shape)
normal_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def testNormalEntropyWithScalarInputs(self):
# Scipy.stats.norm cannot deal with the shapes in the other test.
with self.test_session():
mu_v = 2.34
sigma_v = 4.56
normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)
# scipy.stats.norm cannot deal with these shapes.
expected_entropy = stats.norm(mu_v, sigma_v).entropy()
entropy = normal.entropy()
self.assertAllClose(expected_entropy, entropy.eval())
self.assertAllEqual(normal.batch_shape().eval(), entropy.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), entropy.eval().shape)
self.assertAllEqual(normal.get_batch_shape(), entropy.get_shape())
self.assertAllEqual(normal.get_batch_shape(), entropy.eval().shape)
quantized_distribution_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def testNormalLogProbWithCutoffs(self):
# At integer values, the result should be the same as the standard normal.
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=0., scale=1.),
lower_cutoff=-2.,
upper_cutoff=2.)
sm_normal = stats.norm(0., 1.)
# These cutoffs create partitions of the real line, and indices:
# (-inf, -2](-2, -1](-1, 0](0, 1](1, inf)
# -2 -1 0 1 2
# Test interval (-inf, -2], <--> index -2.
self.assertAllClose(
np.log(sm_normal.cdf(-2)), qdist.log_prob(-2.).eval(), atol=0)
# Test interval (-2, -1], <--> index -1.
self.assertAllClose(
np.log(sm_normal.cdf(-1) - sm_normal.cdf(-2)),
qdist.log_prob(-1.).eval(),
atol=0)
# Test interval (-1, 0], <--> index 0.
self.assertAllClose(
np.log(sm_normal.cdf(0) - sm_normal.cdf(-1)),
qdist.log_prob(0.).eval(),
atol=0)
# Test interval (1, inf), <--> index 2.
self.assertAllClose(
np.log(1. - sm_normal.cdf(1)), qdist.log_prob(2.).eval(), atol=0)