def gen_training_data(
num_features,
num_training_samples,
num_outputs,
noise_scale=0.1,
):
np.random.seed(0)
random.seed(1)
input_distribution = stats.norm()
training_inputs = input_distribution.rvs(
size=(num_training_samples, num_features)
).astype(np.float32)
weights = np.random.normal(size=(num_outputs, num_features)
).astype(np.float32).transpose()
noise = np.multiply(
np.random.normal(size=(num_training_samples, num_outputs)), noise_scale
)
training_outputs = (np.dot(training_inputs, weights) +
noise).astype(np.float32)
return training_inputs, training_outputs, weights, input_distribution
python类norm()的实例源码
def get_prediction_dist(
trainer,
num_outputs=1,
num_features=4,
num_training_samples=100,
num_test_datapoints=10,
num_training_iterations=10000,
):
test_inputs, test_outputs, _ = _train(
trainer, num_features, num_training_samples, num_test_datapoints,
num_outputs, num_training_iterations
)
predictions = trainer.score(test_inputs)
dist = np.linalg.norm(test_outputs - predictions)
return dist
def get_weight_dist(
trainer,
num_outputs=1,
num_features=4,
num_training_samples=100,
num_test_datapoints=10,
num_training_iterations=10000,
):
_, _, weights = _train(
trainer, num_features, num_training_samples, num_test_datapoints,
num_outputs, num_training_iterations
)
trained_weights = np.concatenate(
[workspace.FetchBlob(w) for w in trainer.weights], axis=0
).transpose()
dist = np.linalg.norm(trained_weights - weights)
return dist
def setUp(self):
'''
Saves the current random state for later recovery, sets the random seed
to get reproducible results and manually constructs a mixed vine.
'''
# Save random state for later recovery
self.random_state = np.random.get_state()
# Set fixed random seed
np.random.seed(0)
# Manually construct mixed vine
self.dim = 3 # Dimension
self.vine = MixedVine(self.dim)
# Specify marginals
self.vine.set_marginal(0, norm(0, 1))
self.vine.set_marginal(1, poisson(5))
self.vine.set_marginal(2, gamma(2, 0, 4))
# Specify pair copulas
self.vine.set_copula(1, 0, GaussianCopula(0.5))
self.vine.set_copula(1, 1, FrankCopula(4))
self.vine.set_copula(2, 0, ClaytonCopula(5))
def compute_coarse_coding_features(num_states=3, npoints=600):
# TODO
assert num_states == 3
cc_features = np.zeros((num_states, npoints))
x1 = np.linspace(-1.5, 1.5, npoints)
x2 = np.linspace(-1.0, 2.0, npoints)
x3 = np.linspace(-0.5, 2.5, npoints)
mu1 = 0.0
mu2 = 0.5
mu3 = 1.0
sigma = 0.4
from scipy.stats import norm
cc_features[0, :] = norm(mu1, sigma).pdf(x1)
cc_features[1, :] = norm(mu2, sigma).pdf(x2)
cc_features[2, :] = norm(mu3, sigma).pdf(x3)
return cc_features
def setUp(self):
"""Setup the Prior object."""
# A half-bounded uniform prior ensuring parm >= 0.
self.uPrior = Prior(UniformUnnormedRV(lower=0.))
# A normalized uniform prior ensuring param [10^-18, 10^-12]
self.bPrior = Prior(UniformBoundedRV(1.0e-18, 1.0e-12))
# A bounded Gaussian prior to ensure that param is in [0, 1]
mean, std, low, up = 0.9, 0.1, 0.0, 1.0
self.gPrior = Prior(GaussianBoundedRV(loc=mean, scale=std,
lower=low, upper=up))
# A Gaussian prior
self.nPrior = Prior(norm(loc=0, scale=1))
# Linear exponent prior p(x) ~ 10**x
self.lePrior = Prior(LinearExpRV(lower=-18, upper=-12))
normal_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def testNormalLogPDF(self):
with self.test_session():
batch_size = 6
mu = constant_op.constant([3.0] * batch_size)
sigma = constant_op.constant([math.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
normal = normal_lib.Normal(loc=mu, scale=sigma)
expected_log_pdf = stats.norm(mu.eval(), sigma.eval()).logpdf(x)
log_pdf = normal.log_prob(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllEqual(normal.batch_shape().eval(), log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), log_pdf.eval().shape)
self.assertAllEqual(normal.get_batch_shape(), log_pdf.get_shape())
self.assertAllEqual(normal.get_batch_shape(), log_pdf.eval().shape)
pdf = normal.prob(x)
self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
self.assertAllEqual(normal.batch_shape().eval(), pdf.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), pdf.eval().shape)
self.assertAllEqual(normal.get_batch_shape(), pdf.get_shape())
self.assertAllEqual(normal.get_batch_shape(), pdf.eval().shape)
normal_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def testNormalCDF(self):
with self.test_session():
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
expected_cdf = stats.norm(mu, sigma).cdf(x)
cdf = normal.cdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0)
self.assertAllEqual(normal.batch_shape().eval(), cdf.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), cdf.eval().shape)
self.assertAllEqual(normal.get_batch_shape(), cdf.get_shape())
self.assertAllEqual(normal.get_batch_shape(), cdf.eval().shape)
normal_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 35
收藏 0
点赞 0
评论 0
def testNormalSurvivalFunction(self):
with self.test_session():
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
expected_sf = stats.norm(mu, sigma).sf(x)
sf = normal.survival_function(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0)
self.assertAllEqual(normal.batch_shape().eval(), sf.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), sf.eval().shape)
self.assertAllEqual(normal.get_batch_shape(), sf.get_shape())
self.assertAllEqual(normal.get_batch_shape(), sf.eval().shape)
normal_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 36
收藏 0
点赞 0
评论 0
def testNormalLogCDF(self):
with self.test_session():
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-100.0, 10.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
expected_cdf = stats.norm(mu, sigma).logcdf(x)
cdf = normal.log_cdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0, rtol=1e-5)
self.assertAllEqual(normal.batch_shape().eval(), cdf.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), cdf.eval().shape)
self.assertAllEqual(normal.get_batch_shape(), cdf.get_shape())
self.assertAllEqual(normal.get_batch_shape(), cdf.eval().shape)
normal_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def testNormalEntropy(self):
with self.test_session():
mu_v = np.array([1.0, 1.0, 1.0])
sigma_v = np.array([[1.0, 2.0, 3.0]]).T
normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)
# scipy.stats.norm cannot deal with these shapes.
sigma_broadcast = mu_v * sigma_v
expected_entropy = 0.5 * np.log(2 * np.pi * np.exp(1) * sigma_broadcast**
2)
entropy = normal.entropy()
np.testing.assert_allclose(expected_entropy, entropy.eval())
self.assertAllEqual(normal.batch_shape().eval(), entropy.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), entropy.eval().shape)
self.assertAllEqual(normal.get_batch_shape(), entropy.get_shape())
self.assertAllEqual(normal.get_batch_shape(), entropy.eval().shape)
transformed_distribution_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def testShapeChangingBijector(self):
with self.test_session():
softmax = bs.SoftmaxCentered()
standard_normal = ds.Normal(loc=0., scale=1.)
multi_logit_normal = self._cls()(
distribution=standard_normal,
bijector=softmax)
x = [[-np.log(3.), 0.],
[np.log(3), np.log(5)]]
y = softmax.forward(x).eval()
expected_log_pdf = (stats.norm(loc=0., scale=1.).logpdf(x) -
np.sum(np.log(y), axis=-1))
self.assertAllClose(expected_log_pdf,
multi_logit_normal.log_prob(y).eval())
self.assertAllClose(
[1, 2, 3, 2],
array_ops.shape(multi_logit_normal.sample([1, 2, 3])).eval())
self.assertAllEqual([2], multi_logit_normal.get_event_shape())
self.assertAllEqual([2], multi_logit_normal.event_shape().eval())
quantized_distribution_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def testNormalCdfAndSurvivalFunction(self):
# At integer values, the result should be the same as the standard normal.
batch_shape = (3, 3)
mu = rng.randn(*batch_shape)
sigma = rng.rand(*batch_shape) + 1.0
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
sp_normal = stats.norm(mu, sigma)
x = rng.randint(-5, 5, size=batch_shape).astype(np.float64)
self.assertAllClose(sp_normal.cdf(x), qdist.cdf(x).eval())
self.assertAllClose(sp_normal.sf(x), qdist.survival_function(x).eval())
quantized_distribution_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def testNormalLogCdfAndLogSurvivalFunction(self):
# At integer values, the result should be the same as the standard normal.
batch_shape = (3, 3)
mu = rng.randn(*batch_shape)
sigma = rng.rand(*batch_shape) + 1.0
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
sp_normal = stats.norm(mu, sigma)
x = rng.randint(-10, 10, size=batch_shape).astype(np.float64)
self.assertAllClose(sp_normal.logcdf(x), qdist.log_cdf(x).eval())
self.assertAllClose(
sp_normal.logsf(x), qdist.log_survival_function(x).eval())
def getNegLL(self, t, mu, sqrtVar, controls, cases):
z = (mu-t) / sqrtVar
logProbControls = stats.norm(0,1).logcdf(-z)
logProbCases = stats.norm(0,1).logcdf(z)
ll = logProbControls[controls].sum() + logProbCases[cases].sum()
return -ll
def test_dataobject_set_column_values(self):
X = array([norm(1.0).rvs(10) for _ in range(1000)])
y = [None] * 1000
DO = DataObject(c_[X,y], class_column=len(X[0]))
assert_equal(len(X[0]), DO.class_column)
assert_equal(unique(y), DO.classes_)
classes=[None] + ['1', '2', '3', '4', '5']
DO = DataObject(c_[X,y], class_column=len(X[0]), classes=classes)
assert_equal(len(X[0]), DO.class_column)
assert_equal(classes, DO.classes_)
X2 = DO.as_2d_array()
assert_allclose(X2.T[:-1].T.astype(float), X)
assert_equal(X2.T[-1],y)
new_y = ["%i"%(divmod(i,5)[1]+1) for i in range(len(X))]
DO.set_column_values(len(X[0]), new_y)
assert_equal(len(X[0]), DO.class_column)
assert_equal([None]+list(unique(new_y)), DO.classes_)
X2 = DO.as_2d_array()
assert_allclose(X2.T[:-1].T.astype(float), X)
assert_equal(X2.T[-1], new_y)
def test_outlier_detection(self):
print "Start of test"
n_samples = 1000
norm_dist = stats.norm(0, 1)
truth = np.ones((n_samples,))
truth[-100:] = -1
X0 = norm_dist.rvs(n_samples)
X = np.c_[X0*5, X0+norm_dist.rvs(n_samples)*2]
uniform_dist = stats.uniform(-10,10)
X[-100:] = np.c_[uniform_dist.rvs(100),uniform_dist.rvs(100)]
outlier_detector = pyisc.SklearnOutlierDetector(
100.0/n_samples,
pyisc.P_Gaussian([0,1])
)
outlier_detector.fit(X, np.array([1]*len(X)))
self.assertLess(outlier_detector.threshold_, 0.35)
self.assertGreater(outlier_detector.threshold_, 0.25)
predictions = outlier_detector.predict(X, np.array([1]*len(X)))
accuracy = sum(truth == predictions)/float(n_samples)
print "accuracy", accuracy
self.assertGreater(accuracy, 0.85)
def nu2phr(nu):
phr = np.sqrt(2.0 / nu) * spesh.gamma((nu + 1.0) / 2.0) / \
spesh.gamma(nu / 2.0)
phr = sps.t.pdf(0.0, nu) / sps.norm.pdf(0.0)
return phr
# read in data
def fc_explicit_param_names(self):
brew.Register(fc_explicit_param_names)
model = model_helper.ModelHelper(name="test_model")
dim_in = 10
dim_out = 100
weight_name = 'test_weight_name'
bias_name = 'test_bias_name'
inputs_name = 'test_inputs'
output_name = 'test_output'
input_distribution = stats.norm()
inputs = input_distribution.rvs(size=(1, dim_in)).astype(np.float32)
workspace.FeedBlob(inputs_name, inputs)
weights = np.random.normal(size=(dim_out, dim_in)).astype(np.float32)
bias = np.transpose(
np.random.normal(size=(dim_out, )).astype(np.float32)
)
brew.fc_explicit_param_names(
model,
inputs_name,
output_name,
dim_in=dim_in,
dim_out=dim_out,
bias_name=bias_name,
weight_name=weight_name,
weight_init=("GivenTensorFill", {'values': weights}),
bias_init=("GivenTensorFill", {'values': bias})
)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
expected_output = np.dot(inputs, np.transpose(weights)) + bias
outputs_diff = expected_output - workspace.FetchBlob(output_name)
self.assertEqual(np.linalg.norm(outputs_diff), 0)
def test_simple_hpo():
def f(args):
x = args['x']
return x*x
s = {'x': {'dist': st.uniform(loc=-10., scale=20), 'lo': -10., 'hi': 10.}}
trials = []
# Test fmin and ability to continue adding to trials
best = fmin(loss_fn=f, space=s, max_evals=40, trials=trials)
best = fmin(loss_fn=f, space=s, max_evals=10, trials=trials)
assert len(trials) == 50, "HPO continuation trials not working"
# Test verbose flag
best = fmin(loss_fn=f, space=s, max_evals=10, trials=trials)
yarray = np.array([tr['loss'] for tr in trials])
np.testing.assert_array_less(yarray, 100.)
xarray = np.array([tr['x'] for tr in trials])
np.testing.assert_array_less(np.abs(xarray), 10.)
assert best['loss'] < 100., "HPO out of range"
assert np.abs(best['x']) < 10., "HPO out of range"
# Test unknown distributions
s2 = {'x': {'dist': 'normal', 'mu': 0., 'sigma': 1.}}
trials2 = []
with pytest.raises(ValueError) as excinfo:
fmin(loss_fn=f, space=s2, max_evals=40, trials=trials2)
assert "Unknown distribution type for variable" in str(excinfo.value)
s3 = {'x': {'dist': st.norm(loc=0., scale=1.)}}
trials3 = []
fmin(loss_fn=f, space=s3, max_evals=40, trials=trials3)
def onedmodel():
"""One dimensional model with normal prior."""
mu = -2
sd = 3
x = SampledParam(norm, loc=mu, scale=sd)
like = simple_likelihood
return [x], like
def multidmodel():
"""Multidimensional model with normal prior."""
mu = np.array([-6.6, 3, 1.0, -.12])
sd = np.array([.13, 5, .9, 1.0])
x = SampledParam(norm, loc=mu, scale=sd)
like = simple_likelihood
return [x], like
def forward(self, z, mu, sig):
self.save_for_backward(z, mu, sig)
p = st.norm(mu.cpu().numpy(),sig.cpu().numpy())
return torch.DoubleTensor((self.gamma_under + self.gamma_over) * p.cdf(
z.cpu().numpy()) - self.gamma_under).cuda()
def backward(self, grad_output):
z, mu, sig = self.saved_tensors
p = st.norm(mu.cpu().numpy(),sig.cpu().numpy())
pz = torch.DoubleTensor(p.pdf(z.cpu().numpy())).cuda()
dz = (self.gamma_under + self.gamma_over) * pz
dmu = -dz
dsig = -(self.gamma_under + self.gamma_over)*(z-mu) / sig * pz
return grad_output * dz, grad_output * dmu, grad_output * dsig
def forward(self, z, mu, sig):
self.save_for_backward(z, mu, sig)
p = st.norm(mu.cpu().numpy(),sig.cpu().numpy())
return torch.DoubleTensor((self.gamma_under + self.gamma_over) * p.pdf(
z.cpu().numpy())).cuda()
def backward(self, grad_output):
z, mu, sig = self.saved_tensors
p = st.norm(mu.cpu().numpy(),sig.cpu().numpy())
pz = torch.DoubleTensor(p.pdf(z.cpu().numpy())).cuda()
dz = -(self.gamma_under + self.gamma_over) * (z-mu) / (sig**2) * pz
dmu = -dz
dsig = (self.gamma_under + self.gamma_over) * ((z-mu)**2 - sig**2) / \
(sig**3) * pz
return grad_output * dz, grad_output * dmu, grad_output * dsig
def forward(self, mu, sig):
nBatch, n = mu.size()
# Find the solution via sequential quadratic programming,
# not preserving gradients
z0 = Variable(1. * mu.data, requires_grad=False)
mu0 = Variable(1. * mu.data, requires_grad=False)
sig0 = Variable(1. * sig.data, requires_grad=False)
for i in range(20):
dg = GLinearApprox(self.params["gamma_under"],
self.params["gamma_over"])(z0, mu0, sig0)
d2g = GQuadraticApprox(self.params["gamma_under"],
self.params["gamma_over"])(z0, mu0, sig0)
z0_new = SolveSchedulingQP(self.params)(z0, mu0, dg, d2g)
solution_diff = (z0-z0_new).norm().data[0]
print("+ SQP Iter: {}, Solution diff = {}".format(i, solution_diff))
z0 = z0_new
if solution_diff < 1e-10:
break
# Now that we found the solution, compute the gradient-propagating
# version at the solution
dg = GLinearApprox(self.params["gamma_under"],
self.params["gamma_over"])(z0, mu, sig)
d2g = GQuadraticApprox(self.params["gamma_under"],
self.params["gamma_over"])(z0, mu, sig)
return SolveSchedulingQP(self.params)(z0, mu, dg, d2g)
def pdf(self, x):
return norm(self.mean, self.var).pdf(x)
def test_sample(self):
"""check sampling from priors"""
msg = "normal sample incorrect"
samp = self.nPrior.sample(random_state=10)
correct = norm(loc=0, scale=1).rvs(random_state=10)
assert samp == correct, msg
def plot_canonical_gauss(x, obs_mu, obs_sigma, obs_loss,
title, epsilon=0.05, breaks=100):
# compute grid
mu_grid = np.linspace(start=min(obs_mu) - epsilon,
stop=max(obs_mu) + epsilon,
num=breaks)
sigma_grid = np.linspace(start=max(min(obs_sigma) - epsilon, 0.0),
stop=max(obs_sigma) + epsilon,
num=breaks)
mu_grid, sigma_grid = np.meshgrid(mu_grid, sigma_grid)
loss_grid = -np.sum(
[sp.norm(loc=mu_grid, scale=sigma_grid).logpdf(x=xi) for xi in x],
axis=0)
# plot contours and loss
fig, ax = plt.subplots(nrows=1, ncols=2)
ax[0].contour(mu_grid, sigma_grid, loss_grid,
levels=np.linspace(np.min(loss_grid),
np.max(loss_grid),
breaks),
cmap='terrain')
ax[0].plot(obs_mu, obs_sigma, color='red', alpha=0.5,
linestyle='dashed', linewidth=1, marker='.', markersize=3)
ax[0].set_xlabel('mu')
ax[0].set_ylabel('sigma')
ax[1].plot(range(len(obs_loss)), obs_loss)
ax[1].set_xlabel('iter')
# ax[1].set_ylabel('loss')
plt.suptitle('{}'.format(title))
plt.show()