def test_sample_contexts_from_distribution():
env = Catapult(segments=[(0, 0), (20, 0)], context_interval=(0, 20),
context_distribution=uniform(5, 10), random_state=0)
env.init()
contexts = np.empty(1000)
for i in range(contexts.shape[0]):
context = env.request_context(None)
contexts[i] = context[0]
norm_dist = uniform(0.25, 0.5)
assert_true(np.all(0.25 <= contexts))
assert_true(np.all(contexts <= 0.75))
mean, var = norm_dist.stats("mv")
assert_almost_equal(np.mean(contexts), mean, places=1)
assert_almost_equal(np.var(contexts), var, places=1)
python类uniform()的实例源码
example_sample_robertson_nopysb_with_dream.py 文件源码
项目:PyDREAM
作者: LoLab-VU
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def likelihood(parameter_vector):
parameter_vector = 10**np.array(parameter_vector)
#Solve ODE system given parameter vector
yout = odeint(odefunc, y0, tspan, args=(parameter_vector,))
cout = yout[:, 2]
#Calculate log probability contribution given simulated experimental values.
logp_ctotal = np.sum(like_ctot.logpdf(cout))
#If simulation failed due to integrator errors, return a log probability of -inf.
if np.isnan(logp_ctotal):
logp_ctotal = -np.inf
return logp_ctotal
# Add vector of rate parameters to be sampled as unobserved random variables in DREAM with uniform priors.
def acquire(self, n, t=None):
"""Return random points from uniform distribution.
Parameters
----------
n : int
Number of acquisition points to return.
t : int, optional
(unused)
Returns
-------
x : np.ndarray
The shape is (n, input_dim)
"""
bounds = np.stack(self.model.bounds)
return ss.uniform(bounds[:, 0], bounds[:, 1] - bounds[:, 0]) \
.rvs(size=(n, self.model.input_dim), random_state=self.random_state)
def test_stop_acceptance_rate_too_low(db_path):
set_acc_rate = 0.2
def model(x):
return {"par": x["par"] + sp.randn()}
def dist(x, y):
return abs(x["par"] - y["par"])
abc = ABCSMC(model, Distribution(par=st.uniform(0, 10)), dist, 10)
abc.new(db_path, {"par": .5})
history = abc.run(-1, 8, min_acceptance_rate=set_acc_rate)
df = history.get_all_populations()
df["acceptance_rate"] = df["particles"] / df["samples"]
assert df["acceptance_rate"].iloc[-1] < set_acc_rate
assert df["acceptance_rate"].iloc[-2] >= set_acc_rate
def get_uniform_paramgrid(hyperparameters, fixed_parameters):
param_grid = dict()
for param_name, hyperparameter in hyperparameters.items():
if fixed_parameters is not None and param_name in fixed_parameters.keys():
continue
if isinstance(hyperparameter, CategoricalHyperparameter):
all_values = hyperparameter.choices
if all(item in ['True', 'False'] for item in all_values):
all_values = [bool(item) for item in all_values]
param_grid[param_name] = all_values
elif isinstance(hyperparameter, UniformFloatHyperparameter):
if hyperparameter.log:
param_grid[param_name] = loguniform(base=2, low=hyperparameter.lower, high=hyperparameter.upper)
else:
param_grid[param_name] = uniform(loc=hyperparameter.lower, scale=hyperparameter.upper-hyperparameter.lower)
elif isinstance(hyperparameter, UniformIntegerHyperparameter):
if hyperparameter.log:
param_grid[param_name] = loguniform_int(base=2, low=hyperparameter.lower, high=hyperparameter.upper)
else:
param_grid[param_name] = randint(low=hyperparameter.lower, high=hyperparameter.upper+1)
else:
raise ValueError()
return param_grid
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def gen(self, N, trials, normal_p_range, anomaly_p_range, anomaly_scale = 1.0):
self.N = N
self.trials = trials
self.gens = [
?ompound_distribution(
stats.uniform(loc=normal_p_range[0], scale=normal_p_range[1] - normal_p_range[0]),
lambda a: stats.gamma(a = a, scale = 1.0)
),
?ompound_distribution(
stats.uniform(loc=anomaly_p_range[0], scale=anomaly_p_range[1] - anomaly_p_range[0]),
lambda a: stats.gamma(a = a, scale = anomaly_scale)
)
]
self.priors = np.array([0.9, 0.1])
self.cats, self.params, self.X = compound_rvs(self.gens, self.priors, self.N, self.trials)
uniform_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def testUniformPDF(self):
with self.test_session():
a = constant_op.constant([-3.0] * 5 + [15.0])
b = constant_op.constant([11.0] * 5 + [20.0])
uniform = uniform_lib.Uniform(a=a, b=b)
a_v = -3.0
b_v = 11.0
x = np.array([-10.5, 4.0, 0.0, 10.99, 11.3, 17.0], dtype=np.float32)
def _expected_pdf():
pdf = np.zeros_like(x) + 1.0 / (b_v - a_v)
pdf[x > b_v] = 0.0
pdf[x < a_v] = 0.0
pdf[5] = 1.0 / (20.0 - 15.0)
return pdf
expected_pdf = _expected_pdf()
pdf = uniform.prob(x)
self.assertAllClose(expected_pdf, pdf.eval())
log_pdf = uniform.log_prob(x)
self.assertAllClose(np.log(expected_pdf), log_pdf.eval())
uniform_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def testUniformCDF(self):
with self.test_session():
batch_size = 6
a = constant_op.constant([1.0] * batch_size)
b = constant_op.constant([11.0] * batch_size)
a_v = 1.0
b_v = 11.0
x = np.array([-2.5, 2.5, 4.0, 0.0, 10.99, 12.0], dtype=np.float32)
uniform = uniform_lib.Uniform(a=a, b=b)
def _expected_cdf():
cdf = (x - a_v) / (b_v - a_v)
cdf[x >= b_v] = 1
cdf[x < a_v] = 0
return cdf
cdf = uniform.cdf(x)
self.assertAllClose(_expected_cdf(), cdf.eval())
log_cdf = uniform.log_cdf(x)
self.assertAllClose(np.log(_expected_cdf()), log_cdf.eval())
uniform_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def testUniformSample(self):
with self.test_session():
a = constant_op.constant([3.0, 4.0])
b = constant_op.constant(13.0)
a1_v = 3.0
a2_v = 4.0
b_v = 13.0
n = constant_op.constant(100000)
uniform = uniform_lib.Uniform(a=a, b=b)
samples = uniform.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000, 2))
self.assertAllClose(
sample_values[::, 0].mean(), (b_v + a1_v) / 2, atol=1e-2)
self.assertAllClose(
sample_values[::, 1].mean(), (b_v + a2_v) / 2, atol=1e-2)
self.assertFalse(
np.any(sample_values[::, 0] < a1_v) or np.any(sample_values >= b_v))
self.assertFalse(
np.any(sample_values[::, 1] < a2_v) or np.any(sample_values >= b_v))
uniform_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 44
收藏 0
点赞 0
评论 0
def testUniformNans(self):
with self.test_session():
a = 10.0
b = [11.0, 100.0]
uniform = uniform_lib.Uniform(a=a, b=b)
no_nans = constant_op.constant(1.0)
nans = constant_op.constant(0.0) / constant_op.constant(0.0)
self.assertTrue(math_ops.is_nan(nans).eval())
with_nans = array_ops.stack([no_nans, nans])
pdf = uniform.prob(with_nans)
is_nan = math_ops.is_nan(pdf).eval()
self.assertFalse(is_nan[0])
self.assertTrue(is_nan[1])
uniform_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def testUniformSampleWithShape(self):
with self.test_session():
a = 10.0
b = [11.0, 20.0]
uniform = uniform_lib.Uniform(a, b)
pdf = uniform.prob(uniform.sample((2, 3)))
# pylint: disable=bad-continuation
expected_pdf = [
[[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
[[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
]
# pylint: enable=bad-continuation
self.assertAllClose(expected_pdf, pdf.eval())
pdf = uniform.prob(uniform.sample())
expected_pdf = [1.0, 0.1]
self.assertAllClose(expected_pdf, pdf.eval())
def test_outlier_detection(self):
print "Start of test"
n_samples = 1000
norm_dist = stats.norm(0, 1)
truth = np.ones((n_samples,))
truth[-100:] = -1
X0 = norm_dist.rvs(n_samples)
X = np.c_[X0*5, X0+norm_dist.rvs(n_samples)*2]
uniform_dist = stats.uniform(-10,10)
X[-100:] = np.c_[uniform_dist.rvs(100),uniform_dist.rvs(100)]
outlier_detector = pyisc.SklearnOutlierDetector(
100.0/n_samples,
pyisc.P_Gaussian([0,1])
)
outlier_detector.fit(X, np.array([1]*len(X)))
self.assertLess(outlier_detector.threshold_, 0.35)
self.assertGreater(outlier_detector.threshold_, 0.25)
predictions = outlier_detector.predict(X, np.array([1]*len(X)))
accuracy = sum(truth == predictions)/float(n_samples)
print "accuracy", accuracy
self.assertGreater(accuracy, 0.85)
def test_large_grid():
"""In this test, we purposely overfit a RandomForest to completely random data
in order to assert that the test error will far supercede the train error.
"""
if not SK18:
custom_cv = KFold(n=y_train.shape[0], n_folds=3, shuffle=True, random_state=42)
else:
custom_cv = KFold(n_splits=3, shuffle=True, random_state=42)
# define the pipe
pipe = Pipeline([
('scaler', SelectiveScaler()),
('pca', SelectivePCA(weight=True)),
('rf', RandomForestClassifier(random_state=42))
])
# define hyper parameters
hp = {
'scaler__scaler': [StandardScaler(), RobustScaler(), MinMaxScaler()],
'pca__whiten': [True, False],
'pca__weight': [True, False],
'pca__n_components': uniform(0.75, 0.15),
'rf__n_estimators': randint(5, 10),
'rf__max_depth': randint(5, 15)
}
# define the grid
grid = RandomizedSearchCV(pipe, hp, n_iter=2, scoring='accuracy', n_jobs=1, cv=custom_cv, random_state=42)
# this will fail because we haven't fit yet
assert_fails(grid.score, (ValueError, AttributeError), X_train, y_train)
# fit the grid
grid.fit(X_train, y_train)
# score for coverage -- this might warn...
with warnings.catch_warnings():
warnings.simplefilter("ignore")
grid.score(X_train, y_train)
# coverage:
assert grid._estimator_type == 'classifier'
# get predictions
tr_pred, te_pred = grid.predict(X_train), grid.predict(X_test)
# evaluate score (SHOULD be better than random...)
accuracy_score(y_train, tr_pred), accuracy_score(y_test, te_pred)
# grid score reports:
# assert fails for bad percentile
assert_fails(report_grid_score_detail, ValueError, **{'random_search': grid, 'percentile': 0.0})
assert_fails(report_grid_score_detail, ValueError, **{'random_search': grid, 'percentile': 1.0})
# assert fails for bad y_axis
assert_fails(report_grid_score_detail, ValueError, **{'random_search': grid, 'y_axis': 'bad_axis'})
# assert passes otherwise
report_grid_score_detail(grid, charts=True, percentile=0.95) # just ensure percentile works
def test_simple_hpo():
def f(args):
x = args['x']
return x*x
s = {'x': {'dist': st.uniform(loc=-10., scale=20), 'lo': -10., 'hi': 10.}}
trials = []
# Test fmin and ability to continue adding to trials
best = fmin(loss_fn=f, space=s, max_evals=40, trials=trials)
best = fmin(loss_fn=f, space=s, max_evals=10, trials=trials)
assert len(trials) == 50, "HPO continuation trials not working"
# Test verbose flag
best = fmin(loss_fn=f, space=s, max_evals=10, trials=trials)
yarray = np.array([tr['loss'] for tr in trials])
np.testing.assert_array_less(yarray, 100.)
xarray = np.array([tr['x'] for tr in trials])
np.testing.assert_array_less(np.abs(xarray), 10.)
assert best['loss'] < 100., "HPO out of range"
assert np.abs(best['x']) < 10., "HPO out of range"
# Test unknown distributions
s2 = {'x': {'dist': 'normal', 'mu': 0., 'sigma': 1.}}
trials2 = []
with pytest.raises(ValueError) as excinfo:
fmin(loss_fn=f, space=s2, max_evals=40, trials=trials2)
assert "Unknown distribution type for variable" in str(excinfo.value)
s3 = {'x': {'dist': st.norm(loc=0., scale=1.)}}
trials3 = []
fmin(loss_fn=f, space=s3, max_evals=40, trials=trials3)
def test_search_basic(xy_classification):
X, y = xy_classification
param_grid = {'class_weight': [None, 'balanced']}
a = dms.GridSearchCV(SVC(kernel='rbf'), param_grid)
a.fit(X, y)
param_dist = {'C': stats.uniform}
b = dms.RandomizedSearchCV(SVC(kernel='rbf'), param_dist)
b.fit(X, y)
def likelihood(parameter_vector):
param_dict = {pname: pvalue for pname, pvalue in zip(pysb_sampled_parameter_names, parameter_vector)}
for pname, pvalue in param_dict.items():
#Change model parameter values to current location in parameter space
model.parameters[pname].value = 10**(pvalue)
#Simulate experimentally measured Ctotal values.
solver.run()
#Calculate log probability contribution from simulated experimental values.
logp_ctotal = np.sum(like_ctot.logpdf(solver.yobs['C_total']))
#If model simulation failed due to integrator errors, return a log probability of -inf.
if np.isnan(logp_ctotal):
logp_ctotal = -np.inf
return logp_ctotal
# Add vector of PySB rate parameters to be sampled as unobserved random variables to DREAM with uniform priors.
def multidmodel_uniform():
"""Multidimensional model with uniform priors."""
lower = np.array([-5, -9, 5, 3])
upper = np.array([10, 0, 7, 8])
range = upper-lower
x = SampledParam(uniform, loc=lower, scale=range)
like =simple_likelihood
return [x], like
def test_rvs_prior_ok(self):
means = [0.8, 0.5]
weights = [.3, .7]
N = 10000
prior_logpdf = ss.uniform(0, 1).logpdf
rvs = GMDistribution.rvs(means, weights=weights, size=N, prior_logpdf=prior_logpdf)
# Ensure prior pdf > 0 for all samples
assert np.all(np.isfinite(prior_logpdf(rvs)))
def gen_sample(loc, scale, sample, distribution_type):
if distribution_type == NORMAL_DISTRIBUTION_TYPE:
return norm(loc=loc, scale=scale).ppf(sample)
elif distribution_type == UNIFORM_DISTRIBUTION_TYPE:
return uniform(loc=loc, scale=scale).ppf(sample)
else:
raise Exception("Invalid distribution type: {}"
.format(distribution_type))
def rvs(self, random_state=None):
if random_state is None:
gen = uniform(loc=self.lo, scale=self.scale).rvs()
else:
gen = uniform(loc=self.lo, scale=self.scale).rvs(random_state=random_state)
if self.mass_on_zero > 0.0 and np.random.uniform() < self.mass_on_zero:
return 0.0
return gen
def rvs(self, random_state=None):
if random_state is None:
exp = uniform(loc=self.lo, scale=self.scale).rvs()
else:
exp = uniform(loc=self.lo, scale=self.scale).rvs(random_state=random_state)
if self.mass_on_zero > 0.0 and np.random.uniform() < self.mass_on_zero:
return 0.0
return self.base ** exp
def __init__(self, outputs=None, inputs=None, noise=None, rng=None):
if rng is None:
rng = gu.gen_rng(1)
if outputs is None:
outputs = [0]
if inputs is None:
inputs = [1]
if noise is None:
noise = .1
self.rng = rng
self.outputs = outputs
self.inputs = inputs
self.noise = noise
self.uniform = uniform(loc=-self.noise, scale=2*self.noise)
def simulate(self, rowid, targets, constraints=None, inputs=None, N=None):
assert targets == self.outputs
assert inputs.keys() == self.inputs
assert not constraints
x = inputs[self.inputs[0]]
u = self.rng.rand()
noise = self.rng.uniform(low=-self.noise, high=self.noise)
if u < .5:
y = x**2 + noise
else:
y = -(x**2 + noise)
return {self.outputs[0]: y}
def logpdf(self, rowid, targets, constraints=None, inputs=None):
assert targets.keys() == self.outputs
assert inputs.keys() == self.inputs
assert not constraints
x = inputs[self.inputs[0]]
y = targets[self.outputs[0]]
return logsumexp([
np.log(.5)+self.uniform.logpdf(y-x**2),
np.log(.5)+self.uniform.logpdf(-y-x**2)
])
def __init__(self, outputs=None, inputs=None, noise=None, rng=None):
if rng is None:
rng = gu.gen_rng(1)
if outputs is None:
outputs = [0]
if inputs is None:
inputs = [1]
if noise is None:
noise = .1
self.rng = rng
self.outputs = outputs
self.inputs = inputs
self.noise = noise
self.uniform = uniform(scale=self.noise)
def simulate(self, rowid, targets, constraints=None, inputs=None, N=None):
assert targets == self.outputs
assert inputs.keys() == self.inputs
assert not constraints
x = inputs[self.inputs[0]]
noise = self.rng.uniform(high=self.noise)
if np.cos(x) < 0:
y = np.cos(x) + noise
else:
y = np.cos(x) - noise
return {self.outputs[0]: y}
def __init__(self, outputs=None, inputs=None, low=0, high=1, rng=None):
assert not inputs
if rng is None:
rng = gu.gen_rng(0)
if outputs is None:
outputs = [0]
self.rng = rng
self.low = low
self.high = high
self.outputs = outputs
self.inputs = []
self.uniform = uniform(loc=self.low, scale=self.high-self.low)
def simulate(self, rowid, targets, constraints=None, inputs=None, N=None):
assert not constraints
assert targets == self.outputs
x = self.rng.uniform(low=self.low, high=self.high)
return {self.outputs[0]: x}
def logpdf(self, rowid, targets, constraints=None, inputs=None):
assert not constraints
assert not inputs
assert targets.keys() == self.outputs
x = targets[self.outputs[0]]
return self.uniform.logpdf(x)