def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):
"""``prctile(data, 50)`` returns the median, but p_vals can
also be a sequence.
Provides for small samples or extremes IMHO better values than
matplotlib.mlab.prctile or np.percentile, however also slower.
"""
ps = [p_vals] if np.isscalar(p_vals) else p_vals
if not sorted_:
data = sorted(data)
n = len(data)
d = []
for p in ps:
fi = p * n / 100 - 0.5
if fi <= 0: # maybe extrapolate?
d.append(data[0])
elif fi >= n - 1:
d.append(data[-1])
else:
i = int(fi)
d.append((i + 1 - fi) * data[i] + (fi - i) * data[i + 1])
return d[0] if np.isscalar(p_vals) else d
python类isscalar()的实例源码
def discretize(self, ts, bins=None, global_min=None, global_max=None):
if bins is None:
bins = self._bins
if np.isscalar(bins):
num_bins = bins
min_value = ts.min()
max_value = ts.max()
if min_value == max_value:
min_value = global_min
max_value = global_max
step = (max_value - min_value) / num_bins
ts_bins = np.arange(min_value, max_value, step)
else:
ts_bins = bins
inds = np.digitize(ts, ts_bins)
binned_ts = tuple(str(i - 1) for i in inds)
return binned_ts
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):
"""``prctile(data, 50)`` returns the median, but p_vals can
also be a sequence.
Provides for small samples better values than matplotlib.mlab.prctile,
however also slower.
"""
ps = [p_vals] if isscalar(p_vals) else p_vals
if not sorted_:
data = sorted(data)
n = len(data)
d = []
for p in ps:
fi = p * n / 100 - 0.5
if fi <= 0: # maybe extrapolate?
d.append(data[0])
elif fi >= n - 1:
d.append(data[-1])
else:
i = int(fi)
d.append((i + 1 - fi) * data[i] + (fi - i) * data[i + 1])
return d[0] if isscalar(p_vals) else d
def elli(self, x, rot=0, xoffset=0, cond=1e6, actuator_noise=0.0, both=False):
"""Ellipsoid test objective function"""
if not isscalar(x[0]): # parallel evaluation
return [self.elli(xi, rot) for xi in x] # could save 20% overall
if rot:
x = rotate(x)
N = len(x)
if actuator_noise:
x = x + actuator_noise * np.random.randn(N)
ftrue = sum(cond**(np.arange(N) / (N - 1.)) * (x + xoffset)**2)
alpha = 0.49 + 1. / N
beta = 1
felli = np.random.rand(1)[0]**beta * ftrue * \
max(1, (10.**9 / (ftrue + 1e-99))**(alpha * np.random.rand(1)[0]))
# felli = ftrue + 1*np.random.randn(1)[0] / (1e-30 +
# np.abs(np.random.randn(1)[0]))**0
if both:
return (felli, ftrue)
else:
# return felli # possibly noisy value
return ftrue # + np.random.randn()
def compute_policy_gradient_full_correction(
action_distrib, action_distrib_mu, action_value, v,
truncation_threshold):
"""Compute off-policy bias correction term wrt all actions."""
assert truncation_threshold is not None
assert np.isscalar(v)
with chainer.no_backprop_mode():
rho_all_inv = compute_full_importance(action_distrib_mu,
action_distrib)
correction_weight = (
np.maximum(1 - truncation_threshold * rho_all_inv,
np.zeros_like(rho_all_inv)) *
action_distrib.all_prob.data[0])
correction_advantage = action_value.q_values.data[0] - v
return -F.sum(correction_weight *
action_distrib.all_log_prob *
correction_advantage, axis=1)
def compute_policy_gradient_sample_correction(
action_distrib, action_distrib_mu, action_value, v,
truncation_threshold):
"""Compute off-policy bias correction term wrt a sampled action."""
assert np.isscalar(v)
assert truncation_threshold is not None
with chainer.no_backprop_mode():
sample_action = action_distrib.sample().data
rho_dash_inv = compute_importance(
action_distrib_mu, action_distrib, sample_action)
if (truncation_threshold > 0 and
rho_dash_inv >= 1 / truncation_threshold):
return chainer.Variable(np.asarray([0], dtype=np.float32))
correction_weight = max(0, 1 - truncation_threshold * rho_dash_inv)
assert correction_weight <= 1
q = float(action_value.evaluate_actions(sample_action).data[0])
correction_advantage = q - v
return -(correction_weight *
action_distrib.log_prob(sample_action) *
correction_advantage)
def __call__(self, x):
"""Return the GMM likelihood for given point(s).
See :eq:`gmm-likelihood`.
Arguments
---------
x : scalar (or) 1D array of reals
Point(s) at which likelihood needs to be computed
Returns
-------
scalar (or) 1D array
Likelihood values at the given point(s)
"""
if np.isscalar(x):
return self.get_gmm_pdf(x)
else:
return np.array([self.get_gmm_pdf(t) for t in x])
def convert_atoms(self, row):
numbers = row.get('numbers')
positions = row.get('positions').astype(self.floatX)
pbc = row.get('pbc')
cell = row.get('cell').astype(self.floatX)
features = [numbers, positions, cell, pbc]
for k in list(self.kvp.keys()):
f = row[k]
if np.isscalar(f):
f = np.array([f])
if f.dtype in [np.float16, np.float32, np.float64]:
f = f.astype(self.floatX)
features.append(f)
for k in list(self.data.keys()):
f = np.array(row.data[k])
if np.isscalar(f):
f = np.array([f])
if f.dtype in [np.float16, np.float32, np.float64]:
f = f.astype(self.floatX)
features.append(f)
return features
def __init__(self, A, shape0):
LinTrans.__init__(self)
self.A = A
if np.isscalar(shape0):
shape0 = (shape0,)
self.shape0 = shape0
# Compute the output shape
# Note that A.dot(x) operates on the second to last axis of x
Ashape = A.shape
shape1 = np.array(shape0)
if len(shape0) == 1:
self.aaxis = 0
else:
self.aaxis = len(shape0)-2
shape1[self.aaxis] = Ashape[0]
self.shape1 = tuple(shape1)
# Set SVD terms to not computed
self.svd_computed = False
self.svd_avail = True
def __init__(self, zval, pz, shape, var_axes=(0,),\
is_complex=False):
Estim.__init__(self)
# Convert scalars to arrays
if np.isscalar(zval):
zval = np.array([zval])
if np.isscalar(pz):
pz = np.array([pz])
# Set parameters
self.zval = zval
self.pz = pz
self.shape = shape
self.is_complex = is_complex
self.fz = -np.log(pz)
# Set the variance axes
if var_axes == 'all':
ndim = len(shape)
var_axes = tuple(range(ndim))
self.var_axes = var_axes
self.cost_avail = True
def test_dense_embeddings(make_categories, reps, layer):
"""Test the embedding layer."""
x, K = make_categories
x = np.repeat(x, reps, axis=-1)
N = len(x)
S = 3
x_, X_ = _make_placeholders(x, S, tf.int32)
output, reg = layer(output_dim=D, n_categories=K)(X_)
tc = tf.test.TestCase()
with tc.test_session():
tf.global_variables_initializer().run()
r = reg.eval()
assert np.isscalar(r)
assert r >= 0
Phi = output.eval(feed_dict={x_: x})
assert Phi.shape == (S, N, D * reps)
def test_dense_outputs(dense, make_data):
"""Make sure the dense layers output expected dimensions."""
x, _, _ = make_data
S = 3
x_, X_ = _make_placeholders(x, S)
N = x.shape[0]
Phi, KL = dense(output_dim=D)(X_)
tc = tf.test.TestCase()
with tc.test_session():
tf.global_variables_initializer().run()
P = Phi.eval(feed_dict={x_: x})
assert P.shape == (S, N, D)
assert P.dtype == np.float32
assert np.isscalar(KL.eval(feed_dict={x_: x}))
def test_kl_gaussian_normal(random):
"""Test Gaussian/Normal KL."""
dim = (5, 10)
Dim = (5, 10, 10)
mu0 = random.randn(*dim).astype(np.float32)
L0 = random_chol(Dim)
q = MultivariateNormalTriL(mu0, L0)
mu1 = random.randn(*dim).astype(np.float32)
std1 = 1.0
L1 = [(std1 * np.eye(dim[1])).astype(np.float32) for _ in range(dim[0])]
p = tf.distributions.Normal(mu1, std1)
KL = kl_sum(q, p)
KLr = KLdiv(mu0, L0, mu1, L1)
tc = tf.test.TestCase()
with tc.test_session():
kl = KL.eval()
assert np.isscalar(kl)
assert np.allclose(kl, KLr)
def _determine_channels_and_depth(layers, depth, color_mode):
# type: (List[Layer], Optional[int], int) -> Tuple[int, int]
num_channels = 0
for image in _iterate_all_images(layers):
if (image.color_mode is not None and
image.color_mode != color_mode):
raise ValueError("Mismatched color mode")
for index, channel in image.channels.items():
if np.isscalar(channel):
continue
num_channels = max(num_channels, index + 1)
channel_depth = channel.dtype.itemsize * 8
if depth is None:
depth = channel_depth
elif depth != channel_depth:
raise ValueError("Different image depths in input")
if num_channels == 0 or depth is None:
raise ValueError("Can't determine num channels or depth")
return num_channels, depth
def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):
"""``prctile(data, 50)`` returns the median, but p_vals can
also be a sequence.
Provides for small samples better values than matplotlib.mlab.prctile,
however also slower.
"""
ps = [p_vals] if isscalar(p_vals) else p_vals
if not sorted_:
data = sorted(data)
n = len(data)
d = []
for p in ps:
fi = p * n / 100 - 0.5
if fi <= 0: # maybe extrapolate?
d.append(data[0])
elif fi >= n - 1:
d.append(data[-1])
else:
i = int(fi)
d.append((i + 1 - fi) * data[i] + (fi - i) * data[i + 1])
return d[0] if isscalar(p_vals) else d
def elli(self, x, rot=0, xoffset=0, cond=1e6, actuator_noise=0.0, both=False):
"""Ellipsoid test objective function"""
if not isscalar(x[0]): # parallel evaluation
return [self.elli(xi, rot) for xi in x] # could save 20% overall
if rot:
x = rotate(x)
N = len(x)
if actuator_noise:
x = x + actuator_noise * np.random.randn(N)
ftrue = sum(cond**(np.arange(N) / (N - 1.)) * (x + xoffset)**2)
alpha = 0.49 + 1. / N
beta = 1
felli = np.random.rand(1)[0]**beta * ftrue * \
max(1, (10.**9 / (ftrue + 1e-99))**(alpha * np.random.rand(1)[0]))
# felli = ftrue + 1*np.random.randn(1)[0] / (1e-30 +
# np.abs(np.random.randn(1)[0]))**0
if both:
return (felli, ftrue)
else:
# return felli # possibly noisy value
return ftrue # + np.random.randn()
def _f(let):
"""
f function from Dasu paper, takes let-cube as parameter
Equation (7) in https://doi.org/10.1093/jrr/rru020
input parameters may be either numpy.array or scalars
TODO: handle real cubes.
:params let: LET in [keV/um]
:returns: result of the f function
"""
ld = 86.0
result = (1 - np.exp(-let / ld) * (1 + let / ld)) * ld / let
# map any zero LET areas to 0.0
if np.isscalar(result): # scalar
if result == np.inf:
result = 0.0
else:
result[result == np.inf] = 0.0 # numpy arrays
return result
def sparse_to_dense(voxel_data, dims, dtype=np.bool):
if voxel_data.ndim != 2 or voxel_data.shape[0] != 3:
raise ValueError('voxel_data is wrong shape; should be 3xN array.')
if np.isscalar(dims):
dims = [dims] * 3
dims = np.atleast_2d(dims).T
# truncate to integers
xyz = voxel_data.astype(np.int)
# discard voxels that fall outside dims
valid_ix = ~np.any((xyz < 0) | (xyz >= dims), 0)
xyz = xyz[:, valid_ix]
out = np.zeros(dims.flatten(), dtype=dtype)
out[tuple(xyz)] = True
return out
# def get_linear_index(x, y, z, dims):
# """ Assuming xzy order. (y increasing fastest.
# TODO ensure this is right when dims are not all same
# """
# return x*(dims[1]*dims[2]) + z*dims[1] + y
def __init__(self, lower, upper, shape=None):
"""Initialize BoundedSpace.
Parameters
----------
lower : array-like
Lower bound of the space. Either an array or an integer.
Must agree with the input of the upper bound.
upper : array-like
Upper bound of the space. Either an array or an integer. Must
agree with the input of the lower bound.
shape : integer
Shape of the bounds. Input will be ignored, if the bounds are non
scalar, if they are scalar, it must be set.
"""
if (np.isscalar(lower) and np.isscalar(upper)):
assert shape is not None, "Shape must be set, if bounds are scalar"
self.lower = np.zeros(shape) + lower
self.upper = np.zeros(shape) + upper
else:
self.lower = np.array(lower)
self.upper = np.array(upper)
assert self.lower.shape == self.upper.shape, "Shapes do not agree."
self._dim = None
def test_env(spec):
env = spec.make()
ob_space = env.observation_space
act_space = env.action_space
ob = env.reset()
assert ob_space.contains(ob), 'Reset observation: {!r} not in space'.format(ob)
a = act_space.sample()
observation, reward, done, _info = env.step(a)
assert ob_space.contains(observation), 'Step observation: {!r} not in space'.format(observation)
assert np.isscalar(reward), "{} is not a scalar for {}".format(reward, env)
assert isinstance(done, bool), "Expected {} to be a boolean".format(done)
for mode in env.metadata.get('render.modes', []):
env.render(mode=mode)
env.render(close=True)
# Make sure we can render the environment after close.
for mode in env.metadata.get('render.modes', []):
env.render(mode=mode)
env.render(close=True)
env.close()
# Run a longer rollout on some environments
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def __init__(self, length_scale=1.0, magnitude=1.0, check_numerics=True,
debug=False):
assert np.isscalar(length_scale)
assert np.isscalar(magnitude)
assert length_scale > 0 and magnitude > 0
self.length_scale = length_scale
self.magnitude = magnitude
self.check_numerics = check_numerics
self.debug = debug
self.X_train = None
self.y_train = None
self.xy_ = None
self.K = None
self.graph = None
self.vars = None
self.ops = None
def sparse_to_dense(voxel_data, dims, dtype=np.bool):
if voxel_data.ndim!=2 or voxel_data.shape[0]!=3:
raise ValueError('voxel_data is wrong shape; should be 3xN array.')
if np.isscalar(dims):
dims = [dims]*3
dims = np.atleast_2d(dims).T
# truncate to integers
xyz = voxel_data.astype(np.int)
# discard voxels that fall outside dims
valid_ix = ~np.any((xyz < 0) | (xyz >= dims), 0)
xyz = xyz[:,valid_ix]
out = np.zeros(dims.flatten(), dtype=dtype)
out[tuple(xyz)] = True
return out
#def get_linear_index(x, y, z, dims):
#""" Assuming xzy order. (y increasing fastest.
#TODO ensure this is right when dims are not all same
#"""
#return x*(dims[1]*dims[2]) + z*dims[1] + y
def sparse_to_dense(voxel_data, dims, dtype=np.bool):
if voxel_data.ndim!=2 or voxel_data.shape[0]!=3:
raise ValueError('voxel_data is wrong shape; should be 3xN array.')
if np.isscalar(dims):
dims = [dims]*3
dims = np.atleast_2d(dims).T
# truncate to integers
xyz = voxel_data.astype(np.int)
# discard voxels that fall outside dims
valid_ix = ~np.any((xyz < 0) | (xyz >= dims), 0)
xyz = xyz[:,valid_ix]
out = np.zeros(dims.flatten(), dtype=dtype)
out[tuple(xyz)] = True
return out
#def get_linear_index(x, y, z, dims):
#""" Assuming xzy order. (y increasing fastest.
#TODO ensure this is right when dims are not all same
#"""
#return x*(dims[1]*dims[2]) + z*dims[1] + y
def store_reference_metadata(self, reference_path, ref_type, metric_prefix):
""" ref_type - string e.g., 'Transcriptome'
metric_prefix - string e.g., 'vdj' """
if self.metadata is None:
self.metadata = {}
ref_metadata = cr_utils._load_reference_metadata_file(reference_path)
for key in cr_constants.REFERENCE_METADATA_KEYS:
value = ref_metadata.get(key, '')
if value is None:
value = ''
# Backward compatibility with old reference metadata jsons that don't contain the type field
if key == cr_constants.REFERENCE_TYPE_KEY and value == '':
self.metadata['%s%s' % (metric_prefix, cr_constants.REFERENCE_TYPE_KEY)] = ref_type
continue
if np.isscalar(value):
self.metadata['%s%s' % (metric_prefix, key)] = value
elif key == cr_constants.REFERENCE_GENOMES_KEY:
# Special case for genome key
self.metadata['%s%s' % (metric_prefix, key)] = cr_reference.get_ref_name_from_genomes(value)
else:
self.metadata['%s%s' % (metric_prefix, key)] = ', '.join(str(x) for x in value)
def getROIstations(geo_point,radiusParam,data,header):
'''
This function returns the 4ID station codes for the stations in a region
The region of interest is defined by the geographic coordinate and a window size
@param geo_point: The geographic (lat,lon) coordinate of interest
@param radiusParam: An overloaded radius of interest [km] or latitude and longitude window [deg] around the geo_point
@param data: Stabilized (or unstabilized) data generated from the data fetcher or out of stab_sys
@param header: Header dictionary with stations metadata keyed by their 4ID code. This is output with the data.
@return station_list, list of site 4ID codes in the specified geographic region
'''
ccPos = (geo_point[0]*np.pi/180, geo_point[1]*np.pi/180)
if np.isscalar(radiusParam):
station_list = []
for ii in header.keys():
coord = (header[ii]['refNEU'][0]*np.pi/180,(header[ii]['refNEU'][1]-360)*np.pi/180)
dist = 6371*2*np.arcsin(np.sqrt(np.sin((ccPos[0]-coord[0])/2)**2+np.cos(ccPos[0])*np.cos(coord[0])*np.sin((ccPos[1]-coord[1])/2)**2))
if np.abs(dist) < radiusParam:
station_list.append(header[ii]['4ID'])
else:
# overloaded radiusParam term to be radius or lat/lon window size
latWin = radiusParam[0]/2
lonWin = radiusParam[1]/2
station_list = []
try:
for ii in header.keys():
coord = (header[ii]['refNEU'][0],(header[ii]['refNEU'][1]-360))
if (geo_point[0]-latWin)<=coord[0]<=(geo_point[0]+latWin) and (geo_point[1]-lonWin)<=coord[1]<=(geo_point[1]+lonWin):
station_list.append(header[ii]['4ID'])
except:
station_list = None
return station_list
def __call__(self, y, x):
'''
Convert pixel coordinates to lat/lon
@param y: y coordinate
@param x: x coordinate
@return (lat, lon)
'''
# # If interpolation of geodata is necessary
# if self.lat_data is None:
ret_lat = self.alat(y+self.y_offset,x+self.x_offset, grid=False)
ret_lon = self.alon(y+self.y_offset,x+self.x_offset, grid=False)
if np.isscalar(y) and np.isscalar(x):
ret_lat = ret_lat.item()
ret_lon = ret_lon.item()
return ret_lat, ret_lon
# # If geodata is the same resolution as science data
# else:
# return self.lat_data[y,x], self.lon_data[y,x]
# Utility function to retrieve the value of a bit in a bit flag
def assert_allclose(x, y, rtol=1e-10, atol=1e-8):
"""Drop in replacement for `numpy.testing.assert_allclose` that shows the nonmatching elements"""
if np.isscalar(x) and np.isscalar(y) == 1:
return np.testing.assert_allclose(x, y, rtol=rtol, atol=atol)
if x.shape != y.shape:
raise AssertionError("Shape mismatch: %s vs %s" % (str(x.shape), str(y.shape)))
d = ~np.isclose(x, y, rtol, atol)
if np.any(d):
miss = np.where(d)[0]
raise AssertionError("""Mismatch of %d elements (%g %%) at the level of rtol=%g, atol=%g
%s
%s
%s""" % (len(miss), len(miss)/x.size, rtol, atol, repr(miss), str(x[d]), str(y[d])))
def __call__(self, solutions, *args, **kwargs):
"""return penalty or list of penalties, by default zero(s).
This interface seems too specifically tailored to the derived
BoundPenalty class, it should maybe change.
"""
if np.isscalar(solutions[0]):
return 0.0
else:
return len(solutions) * [0.0]