def test_ea_search_sklearn_elm_steps(label, do_predict):
'''Test that EaSearchCV can work with numpy, dask.array,
pandas.DataFrame, xarray.Dataset, xarray_filters.MLDataset
'''
from scipy.stats import lognorm
est, make_data, sel, kw = args[label]
parameters = {'kernel': ['linear', 'rbf'],
'C': lognorm(4),}
if isinstance(est, (sk_Pipeline, Pipeline)):
parameters = {'est__{}'.format(k): v
for k, v in parameters.items()}
ea = EaSearchCV(est, parameters,
n_iter=4,
ngen=2,
model_selection=sel,
model_selection_kwargs=kw)
X, y = make_data()
ea.fit(X, y)
if do_predict:
pred = ea.predict(X)
assert isinstance(pred, type(y))
python类lognorm()的实例源码
def test_plot_fragility_curve1():
from scipy.stats import lognorm
filename = abspath(join(testdir, 'plot_fragility_curve1.png'))
if isfile(filename):
os.remove(filename)
FC = wntr.scenario.FragilityCurve()
FC.add_state('Minor', 1, {'Default': lognorm(0.5,scale=0.3)})
FC.add_state('Major', 2, {'Default': lognorm(0.5,scale=0.7)})
plt.figure()
wntr.graphics.plot_fragility_curve(FC)
plt.savefig(filename, format='png')
plt.close()
assert_true(isfile(filename))
def lognormal():
return Fixture(pyro_dist=(dist.lognormal, LogNormal),
scipy_dist=sp.lognorm,
examples=[
{'mu': [1.4], 'sigma': [0.4], 'test_data': [5.5]},
],
scipy_arg_fn=lambda mu, sigma: ((np.array(sigma),), {"scale": np.exp(np.array(mu))}))
def test_log_pdf_on_transformed_distribution(lognormal):
mu_z = Variable(torch.zeros(1))
sigma_z = Variable(torch.ones(1))
dist_params = lognormal.get_dist_params(0)
mu_lognorm = dist_params['mu']
sigma_lognorm = dist_params['sigma']
trans_dist = get_transformed_dist(dist.normal, sigma_lognorm, mu_lognorm)
test_data = lognormal.get_test_data(0)
log_px_torch = trans_dist.log_pdf(test_data, mu_z, sigma_z).data[0]
log_px_np = sp.lognorm.logpdf(
test_data.data.cpu().numpy(),
sigma_lognorm.data.cpu().numpy(),
scale=np.exp(mu_lognorm.data.cpu().numpy()))[0]
assert_equal(log_px_torch, log_px_np, prec=1e-4)
def __init__(self, data, mleDiffCutoff=1.0):
print [min(data), max(data)]
distributions = [st.laplace, st.norm, st.expon, st.dweibull, st.invweibull, st.lognorm, st.uniform]
mles = []
for distribution in distributions:
pars = distribution.fit(data)
mle = distribution.nnlf(pars, data)
mles.append(mle)
results = [(distribution.name, mle) for distribution, mle in zip(distributions, mles)]
for dist in sorted(zip(distributions, mles), key=lambda d: d[1]):
print dist
best_fit = sorted(zip(distributions, mles), key=lambda d: d[1])[0]
print 'Best fit reached using {}, MLE value: {}'.format(best_fit[0].name, best_fit[1])
self.modelSets = []
self.modelOptions = [mod[0].name for mod in sorted(zip(distributions, mles), key=lambda d: d[1])]
## list of scipy distribution ids sorted by their MLEs given the data
## [0] is best, [1], next best and so on
for model in sorted(zip(distributions, mles), key=lambda d: d[1]):
if(model[0].name in getAvailableDistributionsByScipyIds()):
try:
modelDist = getDistributionByScipyId(model[0].name, data)
self.modelSets.append([modelDist, model[1]])
## append the distribution object and the MLE value for this
## particular distribution & the data
## ah frig, I think in the bimodal case, it will be
## something like
except RuntimeError:
pass
else:
## nothing that can be done here, if we dont have a object of
## the distribution needed available, we cant do much about it
pass
def __init__(self,
?=2,
?=0.95,
?=0.90,
?=0.1,
grid_size=100):
self.?, self.?, self.?, self.? = ?, ?, ?, ?
# == Set the grid interval to contain most of the mass of the
# stationary distribution of the consumption endowment == #
ssd = self.? / np.sqrt(1 - self.?**2)
grid_min, grid_max = np.exp(-4 * ssd), np.exp(4 * ssd)
self.grid = np.linspace(grid_min, grid_max, grid_size)
self.grid_size = grid_size
# == set up distribution for shocks == #
self.? = lognorm(?)
self.draws = self.?.rvs(500)
# == h(y) = ? * int G(y,z)^(1-?) ?(dz) == #
self.h = np.empty(self.grid_size)
for i, y in enumerate(self.grid):
self.h[i] = ? * np.mean((y**? * self.draws)**(1 - ?))
## == Now the functions that act on a Lucas Tree == #
def lognormal(self,samples):
"""
Sampling from a Log Normal distribution
Parameters:
m=mean
s=sigma - standard deviation
------------------------------------------------------------------------
- samples: number of values that will be returned.
"""
mean=float(self.__params[0]*1.0)
sigma=float(self.__params[1]*1.0)
distro=lognorm(s=sigma,scale=np.exp(mean))
f=distro.rvs(size=samples)
return f
traffic_distribution.py 文件源码
项目:MultiRelaySelectionDFCoopCom
作者: JianshanZhou
项目源码
文件源码
阅读 37
收藏 0
点赞 0
评论 0
def __init__(self, scenario_flag = "Freeway_Free"):
"""
Totally five scenarios are supported here:
Freeway_Night, Freeway_Free, Freeway_Rush;
Urban_Peak, Urban_Nonpeak.
The PDFs of the vehicle speed and the inter-vehicle space are adapted
from existing references.
"""
if scenario_flag == "Freeway_Night":
self.headway_random = expon(0.0, 1.0/256.41)
meanSpeed = 30.93 #m/s
stdSpeed = 1.2 #m/s
elif scenario_flag == "Freeway_Free":
self.headway_random = lognorm(0.75, 0.0, np.exp(3.4))
meanSpeed = 29.15 #m/s
stdSpeed = 1.5 #m/s
elif scenario_flag == "Freeway_Rush":
self.headway_random = lognorm(0.5, 0.0, np.exp(2.5))
meanSpeed = 10.73 #m/s
stdSpeed = 2.0 #m/s
elif scenario_flag == "Urban_Peak":
scale = 1.096
c = 0.314
loc = 0.0
self.headway_random = fisk(c, loc, scale)
meanSpeed = 6.083 #m/s
stdSpeed = 1.2 #m/s
elif scenario_flag == "Urban_Nonpeak":
self.headway_random = lognorm(0.618, 0.0, np.exp(0.685))
meanSpeed = 12.86 #m/s
stdSpeed = 1.5 #m/s
else:
raise
self.speed_random = norm(meanSpeed, stdSpeed)
def lognormalRvForNormal(mu, sigma):
'''
Define a lognormal RV by the mean and stdev of the underlying Normal distribution
'''
return lognorm(sigma, scale=math.exp(mu))
def LogNormalDistribution(mean, std):
if std == .0:
return Distribution.ConstantDistribution(mean)
var = std * std
mean2 = mean * mean
mu = np.log(mean) - (var/2)*(1/mean2)
sigma = np.sqrt(var/mean2)
dist = UncertainVariable(ss.lognorm(sigma, scale=np.exp(mu)))
logging.debug('Distribution -- LogNormal: ({}, {})'.format(
dist.mean, np.sqrt(dist.var)))
return dist
transformed_distribution_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def testTransformedDistribution(self):
g = ops.Graph()
with g.as_default():
mu = 3.0
sigma = 2.0
# Note: the Jacobian callable only works for this example; more generally
# you may or may not need a reduce_sum.
log_normal = self._cls()(
distribution=ds.Normal(loc=mu, scale=sigma),
bijector=bs.Exp(event_ndims=0))
sp_dist = stats.lognorm(s=sigma, scale=np.exp(mu))
# sample
sample = log_normal.sample(100000, seed=235)
self.assertAllEqual([], log_normal.get_event_shape())
with self.test_session(graph=g):
self.assertAllEqual([], log_normal.event_shape().eval())
self.assertAllClose(
sp_dist.mean(), np.mean(sample.eval()), atol=0.0, rtol=0.05)
# pdf, log_pdf, cdf, etc...
# The mean of the lognormal is around 148.
test_vals = np.linspace(0.1, 1000., num=20).astype(np.float32)
for func in [[log_normal.log_prob, sp_dist.logpdf],
[log_normal.prob, sp_dist.pdf],
[log_normal.log_cdf, sp_dist.logcdf],
[log_normal.cdf, sp_dist.cdf],
[log_normal.survival_function, sp_dist.sf],
[log_normal.log_survival_function, sp_dist.logsf]]:
actual = func[0](test_vals)
expected = func[1](test_vals)
with self.test_session(graph=g):
self.assertAllClose(expected, actual.eval(), atol=0, rtol=0.01)
transformed_distribution_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 35
收藏 0
点赞 0
评论 0
def testCachedSamplesWithoutInverse(self):
with self.test_session() as sess:
mu = 3.0
sigma = 0.02
log_normal = self._cls()(
distribution=ds.Normal(loc=mu, scale=sigma),
bijector=bs.Exp(event_ndims=0))
sample = log_normal.sample(1)
sample_val, log_pdf_val = sess.run([sample, log_normal.log_prob(sample)])
self.assertAllClose(
stats.lognorm.logpdf(sample_val, s=sigma, scale=np.exp(mu)),
log_pdf_val,
atol=1e-2)
def __init__(self, *args):
self.dist_func = []
mean = []
std = []
self.dist_name = []
for dist, mu, sig in args:
if dist is 'lognorm':
"""scipy lognormal
Y -> LN(mu_Y, sig_Y)
ln(Y) -> N(mu_lnY, sig_lnY)
mu_lnY = ln(mu_Y) - 0.5(sig_lnY**2)
sig_lnY = sqrt(ln(1 + sig_Y**2/mu_Y**2))
s = sig_lnY
scale = exp(mu_lnY)
"""
s = np.sqrt(np.log(1 + (sig**2)/mu**2))
scale = np.exp(np.log(mu) - .5 * s**2)
self.dist_func.append(stats.lognorm(s=s, scale=scale))
elif dist is 'gumbel_r':
"""scipy gumbel right skw aka extreme type I
f(x) = exp(-(x-loc)1/scale) exp(-exp(-(x-loc)1/scale))
1/scale = a = sqrt(pi**2/(6*sig**2))
loc = u = mu - 0.5772/a
"""
a = np.sqrt(np.pi**2/(6 * sig**2))
u = mu - 0.5772/a
self.dist_func.append(stats.gumbel_r(loc=u, scale=1/a))
else:
self.dist_func.append(getattr(stats, dist)(loc=mu, scale=sig))
self.dist_name.append(dist)
mean.append(mu)
std.append(sig)
self.mean = np.array(mean)
self.std = np.array(std)
self.rho = np.identity(len(args))
def plot_multiplicative(self, T, npaths=25, show_trend=True):
"""
Plots for the multiplicative decomposition
"""
# Pull out right sizes so we know how to increment
nx, nk, nm = self.nx, self.nk, self.nm
# Matrices for the multiplicative decomposition
?_tilde, H, g = self.multiplicative_decomp()
# Allocate space (nm is the number of functionals - we want npaths for each)
mpath_mult = np.empty((nm*npaths, T))
mbounds_mult = np.empty((nm*2, T))
spath_mult = np.empty((nm*npaths, T))
sbounds_mult = np.empty((nm*2, T))
tpath_mult = np.empty((nm*npaths, T))
ypath_mult = np.empty((nm*npaths, T))
# Simulate for as long as we wanted
moment_generator = self.lss.moment_sequence()
# Pull out population moments
for t in range (T):
tmoms = next(moment_generator)
ymeans = tmoms[1]
yvar = tmoms[3]
# Lower and upper bounds - for each multiplicative functional
for ii in range(nm):
li, ui = ii*2, (ii+1)*2
Mdist = lognorm(np.asscalar(np.sqrt(yvar[nx+nm+ii, nx+nm+ii])),
scale=np.asscalar( np.exp( ymeans[nx+nm+ii]- \
t*(.5)*np.expand_dims(np.diag(H @ H.T),1)[ii])))
Sdist = lognorm(np.asscalar(np.sqrt(yvar[nx+2*nm+ii, nx+2*nm+ii])),
scale = np.asscalar( np.exp(-ymeans[nx+2*nm+ii])))
mbounds_mult[li:ui, t] = Mdist.ppf([.01, .99])
sbounds_mult[li:ui, t] = Sdist.ppf([.01, .99])
# Pull out paths
for n in range(npaths):
x, y = self.lss.simulate(T)
for ii in range(nm):
ypath_mult[npaths*ii+n, :] = np.exp(y[nx+ii, :])
mpath_mult[npaths*ii+n, :] = np.exp(y[nx+nm + ii, :] - np.arange(T)*(.5)*np.expand_dims(np.diag(H @ H.T),1)[ii])
spath_mult[npaths*ii+n, :] = 1/np.exp(-y[nx+2*nm + ii, :])
tpath_mult[npaths*ii+n, :] = np.exp(y[nx+3*nm + ii, :] + np.arange(T)*(.5)*np.expand_dims(np.diag(H @ H.T),1)[ii])
mult_figs = []
for ii in range(nm):
li, ui = npaths*(ii), npaths*(ii+1)
LI, UI = 2*(ii), 2*(ii+1)
mult_figs.append(self.plot_given_paths(T, ypath_mult[li:ui,:], mpath_mult[li:ui,:],
spath_mult[li:ui,:], tpath_mult[li:ui,:],
mbounds_mult[LI:UI,:], sbounds_mult[LI:UI,:], 1,
show_trend=show_trend))
mult_figs[ii].suptitle( r'Multiplicative decomposition of $y_{%s}$' % str(ii+1), fontsize=14)
return mult_figs
def plot_martingales(self, T, npaths=25):
# Pull out right sizes so we know how to increment
nx, nk, nm = self.nx, self.nk, self.nm
# Matrices for the multiplicative decomposition
?_tilde, H, g = self.multiplicative_decomp()
# Allocate space (nm is the number of functionals - we want npaths for each)
mpath_mult = np.empty((nm*npaths, T))
mbounds_mult = np.empty((nm*2, T))
# Simulate for as long as we wanted
moment_generator = self.lss.moment_sequence()
# Pull out population moments
for t in range (T):
tmoms = next(moment_generator)
ymeans = tmoms[1]
yvar = tmoms[3]
# Lower and upper bounds - for each functional
for ii in range(nm):
li, ui = ii*2, (ii+1)*2
Mdist = lognorm(np.asscalar(np.sqrt(yvar[nx+nm+ii, nx+nm+ii])),
scale=np.asscalar( np.exp( ymeans[nx+nm+ii]- \
t*(.5)*np.expand_dims(np.diag(H @ H.T),1)[ii])))
mbounds_mult[li:ui, t] = Mdist.ppf([.01, .99])
# Pull out paths
for n in range(npaths):
x, y = self.lss.simulate(T)
for ii in range(nm):
mpath_mult[npaths*ii+n, :] = np.exp(y[nx+nm + ii, :] - np.arange(T)*(.5)*np.expand_dims(np.diag(H @ H.T),1)[ii])
mart_figs = []
for ii in range(nm):
li, ui = npaths*(ii), npaths*(ii+1)
LI, UI = 2*(ii), 2*(ii+1)
mart_figs.append(self.plot_martingale_paths(T, mpath_mult[li:ui,:],
mbounds_mult[LI:UI,:], horline=1))
mart_figs[ii].suptitle(r'Martingale components for many paths of $y_{%s}$' % str(ii+1),
fontsize=14)
return mart_figs