def isotropic_mean_shift(self):
"""normalized last mean shift, under random selection N(0,I)
distributed.
Caveat: while it is finite and close to sqrt(n) under random
selection, the length of the normalized mean shift under
*systematic* selection (e.g. on a linear function) tends to
infinity for mueff -> infty. Hence it must be used with great
care for large mueff.
"""
z = self.sm.transform_inverse((self.mean - self.mean_old) /
self.sigma_vec.scaling)
# works unless a re-parametrisation has been done
# assert Mh.vequals_approximately(z, np.dot(es.B, (1. / es.D) *
# np.dot(es.B.T, (es.mean - es.mean_old) / es.sigma_vec)))
z /= self.sigma * self.sp.cmean
z *= self.sp.weights.mueff**0.5
return z
python类random()的实例源码
def _random_vec(sites, ldim, randstate=None, dtype=np.complex_):
"""Returns a random complex vector (normalized to ||x||_2 = 1) of shape
(ldim,) * sites, i.e. a pure state with local dimension `ldim` living on
`sites` sites.
:param sites: Number of local sites
:param ldim: Local ldimension
:param randstate: numpy.random.RandomState instance or None
:returns: numpy.ndarray of shape (ldim,) * sites
>>> psi = _random_vec(5, 2); psi.shape
(2, 2, 2, 2, 2)
>>> np.abs(np.vdot(psi, psi) - 1) < 1e-6
True
"""
shape = (ldim, ) * sites
psi = _randfuncs[dtype](shape, randstate=randstate)
psi /= np.linalg.norm(psi)
return psi
def random_mps(sites, ldim, rank, randstate=None, force_rank=False):
"""Returns a randomly choosen normalized matrix product state
:param sites: Number of sites
:param ldim: Local dimension
:param rank: Rank
:param randstate: numpy.random.RandomState instance or None
:param force_rank: If True, the rank is exaclty `rank`.
Otherwise, it might be reduced if we reach the maximum sensible rank.
:returns: randomly choosen matrix product (pure) state
>>> mps = random_mps(4, 2, 10, force_rank=True)
>>> mps.ranks, mps.shape
((10, 10, 10), ((2,), (2,), (2,), (2,)))
>>> mps.canonical_form
(0, 4)
>>> round(abs(1 - mp.inner(mps, mps)), 10)
0.0
"""
return random_mpa(sites, ldim, rank, normalized=True, randstate=randstate,
force_rank=force_rank, dtype=np.complex_)
def _standard_normal(shape, randstate=np.random, dtype=np.float_):
"""Generates a standard normal numpy array of given shape and dtype, i.e.
this function is equivalent to `randstate.randn(*shape)` for real dtype and
`randstate.randn(*shape) + 1.j * randstate.randn(shape)` for complex dtype.
:param tuple shape: Shape of array to be returned
:param randstate: An instance of :class:`numpy.random.RandomState` (default is
``np.random``))
:param dtype: ``np.float_`` (default) or `np.complex_`
Returns
-------
A: An array of given shape and dtype with standard normal entries
"""
if dtype == np.float_:
return randstate.randn(*shape)
elif dtype == np.complex_:
return randstate.randn(*shape) + 1.j * randstate.randn(*shape)
else:
raise ValueError('{} is not a valid dtype.'.format(dtype))
def setup_params(self, data):
keys = ('fun_data', 'fun_y', 'fun_ymin', 'fun_ymax')
if not any(self.params[k] for k in keys):
raise PlotnineError('No summary function')
if self.params['fun_args'] is None:
self.params['fun_args'] = {}
if 'random_state' not in self.params['fun_args']:
if self.params['random_state']:
random_state = self.params['random_state']
if random_state is None:
random_state = np.random
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
self.params['fun_args']['random_state'] = random_state
return self.params
def bootstrap_statistics(series, statistic, n_samples=1000,
confidence_interval=0.95, random_state=None):
"""
Default parameters taken from
R's Hmisc smean.cl.boot
"""
if random_state is None:
random_state = np.random
alpha = 1 - confidence_interval
size = (n_samples, len(series))
inds = random_state.randint(0, len(series), size=size)
samples = series.values[inds]
means = np.sort(statistic(samples, axis=1))
return pd.DataFrame({'ymin': means[int((alpha/2)*n_samples)],
'ymax': means[int((1-alpha/2)*n_samples)],
'y': [statistic(series)]})
def setup_params(self, data):
keys = ('fun_data', 'fun_y', 'fun_ymin', 'fun_ymax')
if not any(self.params[k] for k in keys):
raise PlotnineError('No summary function')
if self.params['fun_args'] is None:
self.params['fun_args'] = {}
if 'random_state' not in self.params['fun_args']:
if self.params['random_state']:
random_state = self.params['random_state']
if random_state is None:
random_state = np.random
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
self.params['fun_args']['random_state'] = random_state
return self.params
def mahalanobis_norm(self, dx):
"""compute the Mahalanobis norm that is induced by the adapted
sample distribution, covariance matrix ``C`` times ``sigma**2``,
including ``sigma_vec``. The expected Mahalanobis distance to
the sample mean is about ``sqrt(dimension)``.
Argument
--------
A *genotype* difference `dx`.
Example
-------
>>> import cma, numpy
>>> es = cma.CMAEvolutionStrategy(numpy.ones(10), 1)
>>> xx = numpy.random.randn(2, 10)
>>> d = es.mahalanobis_norm(es.gp.geno(xx[0]-xx[1]))
`d` is the distance "in" the true sample distribution,
sampled points have a typical distance of ``sqrt(2*es.N)``,
where ``es.N`` is the dimension, and an expected distance of
close to ``sqrt(N)`` to the sample mean. In the example,
`d` is the Euclidean distance, because C = I and sigma = 1.
"""
return sqrt(sum((self.D**-1. * np.dot(self.B.T, dx / self.sigma_vec))**2)) / self.sigma
def __call__(self, x, inverse=False): # function when calling an object
"""Rotates the input array `x` with a fixed rotation matrix
(``self.dicMatrices['str(len(x))']``)
"""
x = np.array(x, copy=False)
N = x.shape[0] # can be an array or matrix, TODO: accept also a list of arrays?
if str(N) not in self.dicMatrices: # create new N-basis for once and all
rstate = np.random.get_state()
np.random.seed(self.seed) if self.seed else np.random.seed()
B = np.random.randn(N, N)
for i in range(N):
for j in range(0, i):
B[i] -= np.dot(B[i], B[j]) * B[j]
B[i] /= sum(B[i]**2)**0.5
self.dicMatrices[str(N)] = B
np.random.set_state(rstate)
if inverse:
return np.dot(self.dicMatrices[str(N)].T, x) # compute rotation
else:
return np.dot(self.dicMatrices[str(N)], x) # compute rotation
# Use rotate(x) to rotate x
def elli(self, x, rot=0, xoffset=0, cond=1e6, actuator_noise=0.0, both=False):
"""Ellipsoid test objective function"""
if not isscalar(x[0]): # parallel evaluation
return [self.elli(xi, rot) for xi in x] # could save 20% overall
if rot:
x = rotate(x)
N = len(x)
if actuator_noise:
x = x + actuator_noise * np.random.randn(N)
ftrue = sum(cond**(np.arange(N) / (N - 1.)) * (x + xoffset)**2)
alpha = 0.49 + 1. / N
beta = 1
felli = np.random.rand(1)[0]**beta * ftrue * \
max(1, (10.**9 / (ftrue + 1e-99))**(alpha * np.random.rand(1)[0]))
# felli = ftrue + 1*np.random.randn(1)[0] / (1e-30 +
# np.abs(np.random.randn(1)[0]))**0
if both:
return (felli, ftrue)
else:
# return felli # possibly noisy value
return ftrue # + np.random.randn()
def __init__(
self,
batch_size,
training_epoch_size=None,
no_stub_batch=False,
shuffle=None,
seed=None,
buffer_size=2,
):
self.batch_size = batch_size
self.training_epoch_size = training_epoch_size
self.no_stub_batch = no_stub_batch
self.shuffle = shuffle
if seed is not None:
self.random = np.random.RandomState(seed)
else:
self.random = np.random
self.buffer_size = buffer_size
def test_process_image(compress, out_dir):
numpy.random.seed(8)
image = numpy.random.randint(256, size=(16, 16, 3), dtype=numpy.uint16)
meta = {
"DNA": "/User/jcaciedo/LUAD/dna.tiff",
"ER": "/User/jcaciedo/LUAD/er.tiff",
"Mito": "/User/jcaciedo/LUAD/mito.tiff"
}
compress.stats["illum_correction_function"] = numpy.ones((16,16,3))
compress.stats["upper_percentiles"] = [255, 255, 255]
compress.stats["lower_percentiles"] = [0, 0, 0]
compress.process_image(0, image, meta)
filenames = glob.glob(os.path.join(out_dir,"*"))
real_filenames = [os.path.join(out_dir, x) for x in ["dna.png", "er.png", "mito.png"]]
filenames.sort()
assert real_filenames == filenames
for i in range(3):
data = scipy.misc.imread(filenames[i])
numpy.testing.assert_array_equal(image[:,:,i], data)
def test_apply(corrector):
image = numpy.random.randint(256, size=(24, 24, 3), dtype=numpy.uint16)
illum_corr_func = numpy.random.rand(24, 24, 3)
illum_corr_func /= illum_corr_func.min()
corrector.illum_corr_func = illum_corr_func
corrected = corrector.apply(image)
expected = image / illum_corr_func
assert corrected.shape == (24, 24, 3)
numpy.testing.assert_array_equal(corrected, expected)
def estimate(self, context, data):
pdf = ScaleMixture()
alpha = context.prior.alpha
beta = context.prior.beta
d = context._d
if len(data.shape) == 1:
data = data[:, numpy.newaxis]
a = alpha + 0.5 * d * len(data.shape)
b = beta + 0.5 * data.sum(-1) ** 2
s = numpy.clip(numpy.random.gamma(a, 1. / b), 1e-20, 1e10)
pdf.scales = s
context.prior.estimate(s)
pdf.prior = context.prior
return pdf
def testRandom(self):
ig = InverseGaussian(1., 1.)
samples = ig.random(1000000)
mu = numpy.mean(samples)
var = numpy.var(samples)
self.assertAlmostEqual(ig.mu, mu, delta=1e-1)
self.assertAlmostEqual(ig.mu ** 3 / ig.shape, var, delta=1e-1)
ig = InverseGaussian(3., 6.)
samples = ig.random(1000000)
mu = numpy.mean(samples)
var = numpy.var(samples)
self.assertAlmostEqual(ig.mu, mu, delta=1e-1)
self.assertAlmostEqual(ig.mu ** 3 / ig.shape, var, delta=5e-1)
def testRandom(self):
from scipy.special import kv
from numpy import sqrt
a = 2.
b = 1.
p = 1
gig = GeneralizedInverseGaussian(a, b, p)
samples = gig.random(10000)
mu_analytical = sqrt(b) * kv(p + 1, sqrt(a * b)) / (sqrt(a) * kv(p, sqrt(a * b)))
var_analytical = b * kv(p + 2, sqrt(a * b)) / a / kv(p, sqrt(a * b)) - mu_analytical ** 2
mu = numpy.mean(samples)
var = numpy.var(samples)
self.assertAlmostEqual(mu_analytical, mu, delta=1e-1)
self.assertAlmostEqual(var_analytical, var, delta=1e-1)
def _check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, int):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def test_poisson(self):
"""Tests that Gibbs sampling the initial process yields a Poisson process."""
nt = 50
ns = 1000
num_giter = 5
net = self.poisson
times = []
for i in range(ns):
arrv = net.sample (nt)
obs = arrv.subset (lambda a,e: a.is_last_in_queue(e), copy_evt)
gsmp = net.gibbs_resample (arrv, 0, num_giter)
resampled = gsmp[-1]
evts = resampled.events_of_task (2)
times.append (evts[0].d)
exact_sample = [ numpy.random.gamma (shape=3, scale=0.5) for i in xrange (ns) ]
times.sort()
exact_sample.sort()
print summarize(times)
print summarize(exact_sample)
netutils.check_quantiles (self, exact_sample, times, ns)
def _sample_testset(self, data):
test_sample = self.test_sample
if not isinstance(test_sample, int):
return data
userid, feedback = self.fields.userid, self.fields.feedback
if test_sample > 0:
sampled = (data.groupby(userid, sort=False, group_keys=False)
.apply(random_choice, test_sample, self.random_state or np.random))
elif test_sample < 0: #leave only the most negative feedback from user
idx = (data.groupby(userid, sort=False)[feedback]
.nsmallest(-test_sample).index.get_level_values(1))
sampled = data.loc[idx]
else:
sampled = data
return sampled
def mahalanobis_norm(self, dx):
"""compute the Mahalanobis norm that is induced by the adapted
sample distribution, covariance matrix ``C`` times ``sigma**2``,
including ``sigma_vec``. The expected Mahalanobis distance to
the sample mean is about ``sqrt(dimension)``.
Argument
--------
A *genotype* difference `dx`.
Example
-------
>>> import cma, numpy
>>> es = cma.CMAEvolutionStrategy(numpy.ones(10), 1)
>>> xx = numpy.random.randn(2, 10)
>>> d = es.mahalanobis_norm(es.gp.geno(xx[0]-xx[1]))
`d` is the distance "in" the true sample distribution,
sampled points have a typical distance of ``sqrt(2*es.N)``,
where ``es.N`` is the dimension, and an expected distance of
close to ``sqrt(N)`` to the sample mean. In the example,
`d` is the Euclidean distance, because C = I and sigma = 1.
"""
return sqrt(sum((self.D**-1. * np.dot(self.B.T, dx / self.sigma_vec))**2)) / self.sigma
def __call__(self, x, inverse=False): # function when calling an object
"""Rotates the input array `x` with a fixed rotation matrix
(``self.dicMatrices['str(len(x))']``)
"""
x = np.array(x, copy=False)
N = x.shape[0] # can be an array or matrix, TODO: accept also a list of arrays?
if str(N) not in self.dicMatrices: # create new N-basis for once and all
rstate = np.random.get_state()
np.random.seed(self.seed) if self.seed else np.random.seed()
B = np.random.randn(N, N)
for i in range(N):
for j in range(0, i):
B[i] -= np.dot(B[i], B[j]) * B[j]
B[i] /= sum(B[i]**2)**0.5
self.dicMatrices[str(N)] = B
np.random.set_state(rstate)
if inverse:
return np.dot(self.dicMatrices[str(N)].T, x) # compute rotation
else:
return np.dot(self.dicMatrices[str(N)], x) # compute rotation
# Use rotate(x) to rotate x
def elli(self, x, rot=0, xoffset=0, cond=1e6, actuator_noise=0.0, both=False):
"""Ellipsoid test objective function"""
if not isscalar(x[0]): # parallel evaluation
return [self.elli(xi, rot) for xi in x] # could save 20% overall
if rot:
x = rotate(x)
N = len(x)
if actuator_noise:
x = x + actuator_noise * np.random.randn(N)
ftrue = sum(cond**(np.arange(N) / (N - 1.)) * (x + xoffset)**2)
alpha = 0.49 + 1. / N
beta = 1
felli = np.random.rand(1)[0]**beta * ftrue * \
max(1, (10.**9 / (ftrue + 1e-99))**(alpha * np.random.rand(1)[0]))
# felli = ftrue + 1*np.random.randn(1)[0] / (1e-30 +
# np.abs(np.random.randn(1)[0]))**0
if both:
return (felli, ftrue)
else:
# return felli # possibly noisy value
return ftrue # + np.random.randn()
def elastic_transform(image, alpha, sigma, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape[1:];
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
#return map_coordinates(image, indices, order=1).reshape(shape)
res = np.zeros_like(image);
for i in xrange(image.shape[0]):
res[i] = map_coordinates(image[i], indices, order=1).reshape(shape)
return res;
def load_augment(fname, w, h, aug_params=no_augmentation_params,
transform=None, sigma=0.0, color_vec=None):
"""Load augmented image with output shape (w, h).
Default arguments return non augmented image of shape (w, h).
To apply a fixed transform (color augmentation) specify transform
(color_vec).
To generate a random augmentation specify aug_params and sigma.
"""
img = load_image(fname)
if transform is None:
img = perturb(img, augmentation_params=aug_params, target_shape=(w, h))
else:
img = perturb_fixed(img, tform_augment=transform, target_shape=(w, h))
np.subtract(img, MEAN[:, np.newaxis, np.newaxis], out=img)
np.divide(img, STD[:, np.newaxis, np.newaxis], out=img)
img = augment_color(img, sigma=sigma, color_vec=color_vec)
return img
def mahalanobis_norm(self, dx):
"""compute the Mahalanobis norm that is induced by the adapted
sample distribution, covariance matrix ``C`` times ``sigma**2``,
including ``sigma_vec``. The expected Mahalanobis distance to
the sample mean is about ``sqrt(dimension)``.
Argument
--------
A *genotype* difference `dx`.
Example
-------
>>> import cma, numpy
>>> es = cma.CMAEvolutionStrategy(numpy.ones(10), 1)
>>> xx = numpy.random.randn(2, 10)
>>> d = es.mahalanobis_norm(es.gp.geno(xx[0]-xx[1]))
`d` is the distance "in" the true sample distribution,
sampled points have a typical distance of ``sqrt(2*es.N)``,
where ``es.N`` is the dimension, and an expected distance of
close to ``sqrt(N)`` to the sample mean. In the example,
`d` is the Euclidean distance, because C = I and sigma = 1.
"""
return sqrt(sum((self.D**-1. * np.dot(self.B.T, dx / self.sigma_vec))**2)) / self.sigma
def __call__(self, x, inverse=False): # function when calling an object
"""Rotates the input array `x` with a fixed rotation matrix
(``self.dicMatrices['str(len(x))']``)
"""
x = np.array(x, copy=False)
N = x.shape[0] # can be an array or matrix, TODO: accept also a list of arrays?
if str(N) not in self.dicMatrices: # create new N-basis for once and all
rstate = np.random.get_state()
np.random.seed(self.seed) if self.seed else np.random.seed()
B = np.random.randn(N, N)
for i in xrange(N):
for j in xrange(0, i):
B[i] -= np.dot(B[i], B[j]) * B[j]
B[i] /= sum(B[i]**2)**0.5
self.dicMatrices[str(N)] = B
np.random.set_state(rstate)
if inverse:
return np.dot(self.dicMatrices[str(N)].T, x) # compute rotation
else:
return np.dot(self.dicMatrices[str(N)], x) # compute rotation
# Use rotate(x) to rotate x
def elli(self, x, rot=0, xoffset=0, cond=1e6, actuator_noise=0.0, both=False):
"""Ellipsoid test objective function"""
if not isscalar(x[0]): # parallel evaluation
return [self.elli(xi, rot) for xi in x] # could save 20% overall
if rot:
x = rotate(x)
N = len(x)
if actuator_noise:
x = x + actuator_noise * np.random.randn(N)
ftrue = sum(cond**(np.arange(N) / (N - 1.)) * (x + xoffset)**2)
alpha = 0.49 + 1. / N
beta = 1
felli = np.random.rand(1)[0]**beta * ftrue * \
max(1, (10.**9 / (ftrue + 1e-99))**(alpha * np.random.rand(1)[0]))
# felli = ftrue + 1*np.random.randn(1)[0] / (1e-30 +
# np.abs(np.random.randn(1)[0]))**0
if both:
return (felli, ftrue)
else:
# return felli # possibly noisy value
return ftrue # + np.random.randn()
def form_set_data(labels, max_num, verbose=False):
"""Generate label sets from sample labels.
For each sample, generate a set by random sampling within the same class.
Set is a tensor
"""
# group sample ids based on label.
label_ids = {}
for idx in range(labels.size):
if labels[idx] not in label_ids:
label_ids[labels[idx]] = []
label_ids[labels[idx]].append(idx)
set_ids = {}
for idx in range(labels.size):
samp_ids = label_ids[labels[idx]][:]
samp_num = min(max_num, len(samp_ids))
set_ids[idx] = rand.sample(samp_ids, samp_num)
if verbose:
print "set {} formed.".format(idx)
return set_ids
def rvs(cls, a, b, size=1, random_state=None):
"""Draw random variates.
Parameters
----------
a : float or array-like
b : float or array-like
size : int, optional
random_state : RandomState, optional
Returns
-------
np.array
"""
u = ss.uniform.rvs(loc=a, scale=b-a, size=size, random_state=random_state)
x = np.exp(u)
return x
def rvs(cls, b, size=1, random_state=None):
"""Get random variates.
Parameters
----------
b : float
size : int or tuple, optional
random_state : RandomState, optional
Returns
-------
arraylike
"""
u = ss.uniform.rvs(loc=0, scale=1, size=size, random_state=random_state)
t1 = np.where(u < 0.5, np.sqrt(2. * u) * b - b, -np.sqrt(2. * (1. - u)) * b + b)
return t1