def _compute(self, windows, dates, assets, mask):
"""
Call the user's `compute` function on each window with a pre-built
output array.
"""
# TODO: Make mask available to user's `compute`.
compute = self.compute
missing_value = self.missing_value
params = self.params
out = full_like(mask, missing_value, dtype=self.dtype)
with self.ctx:
# TODO: Consider pre-filtering columns that are all-nan at each
# time-step?
for idx, date in enumerate(dates):
compute(
date,
assets,
out[idx],
*(next(w) for w in windows),
**params
)
out[~mask] = missing_value
return out
python类full_like()的实例源码
def _is_feasible(kind, enforce_feasibility, f0):
keyword = kind[0]
if keyword == "equals":
lb = np.asarray(kind[1], dtype=float)
ub = np.asarray(kind[1], dtype=float)
elif keyword == "greater":
lb = np.asarray(kind[1], dtype=float)
ub = np.full_like(lb, np.inf, dtype=float)
elif keyword == "less":
ub = np.asarray(kind[1], dtype=float)
lb = np.full_like(ub, -np.inf, dtype=float)
elif keyword == "interval":
lb = np.asarray(kind[1], dtype=float)
ub = np.asarray(kind[2], dtype=float)
else:
raise RuntimeError("Never be here.")
return ((lb[enforce_feasibility] <= f0[enforce_feasibility]).all()
and (f0[enforce_feasibility] <= ub[enforce_feasibility]).all())
def as_strided_writeable():
arr = np.ones(10)
view = as_strided(arr, writeable=False)
assert_(not view.flags.writeable)
# Check that writeable also is fine:
view = as_strided(arr, writeable=True)
assert_(view.flags.writeable)
view[...] = 3
assert_array_equal(arr, np.full_like(arr, 3))
# Test that things do not break down for readonly:
arr.flags.writeable = False
view = as_strided(arr, writeable=False)
view = as_strided(arr, writeable=True)
assert_(not view.flags.writeable)
def draw_axes(self, ax=None):
# concatenate lklhd_pot_diff and lklhd_pot_diff_root
lpd = self.__lklhd_pot_diff
lpdr = self.lklhd_pot_diff_root[np.newaxis,:]
pad = np.full_like(lpdr, np.nan)
data = np.concatenate((lpd, pad, lpdr), axis=0)
lpds = self.lklhd_pot_diff_siblings
if ax is None:
ax = self._graph.get_axes(self.id_axes)
assert len(ax) == self.required_axes
# imshow lklhd_pot_diff
ax[0].set_anchor('N')
imshow_values(data, ax[0], show_value_text=self.show_value_text)
# imshow lklhd_pot_diff_siblings
ax[1].set_anchor('N')
imshow_values(lpds, ax[1], show_value_text=self.show_value_text)
def __f(self, x):
'''??x(k+1)??
??????x(k+1) = A * x(k) + B * u(k)
???
x???x(k)
????
x_next???x(k+1)
'''
yaw = x[2, :]
a = self.__DT_s * np.cos(yaw)
b = self.__DT_s * np.sin(yaw)
c = np.full_like(a, self.__DT_s)
u = np.array([a,
b,
c])
x_next = (self.__A @ x) + (self.__B @ u)
for i in range(x_next.shape[1]):
x_next[2, i] = limit.limit_angle(x_next[2, i])
return x_next
def test_ignore_nans(self):
""" Test that NaNs are ignored. """
source = [np.ones((16,), dtype = np.float) for _ in range(10)]
source.append(np.full_like(source[0], np.nan))
product = cprod(source, ignore_nan = True)
self.assertTrue(np.allclose(product, np.ones_like(product)))
def test_ignore_nans(self):
""" Test that NaNs are ignored. """
source = [np.ones((16,), dtype = np.float) for _ in range(10)]
source.append(np.full_like(source[0], np.nan))
product = last(iprod(source, ignore_nan = True))
self.assertTrue(np.allclose(product, np.ones_like(product)))
def test_filled_like(self):
self.check_like_function(np.full_like, 0, True)
self.check_like_function(np.full_like, 1, True)
self.check_like_function(np.full_like, 1000, True)
self.check_like_function(np.full_like, 123.456, True)
self.check_like_function(np.full_like, np.inf, True)
def input_generator():
for dtype in [np.float64]:
for nsamples in [1000, 10000]:
sigma = 5.0
samples = np.random.normal(loc=0.0, scale=sigma, size=nsamples).astype(dtype)
# For simplicity, initialize bandwidth array with constant using 1D rule of thumb
bandwidths = np.full_like(samples, 1.06 * nsamples**0.2 * sigma)
for neval in [10, 1000, 10000]:
category = ('samples%d' % nsamples, np.dtype(dtype).name)
eval_points = np.random.normal(loc=0.0, scale=5.0, size=neval).astype(dtype)
yield dict(category=category, x=neval, input_args=(eval_points, samples, bandwidths), input_kwargs={})
#### BEGIN: numpy
def test_map_can_only_return_none_if_missing_value_is_none(self):
# Should work.
la = LabelArray(self.strs, missing_value=None)
result = la.map(lambda x: None)
check_arrays(
result,
LabelArray(np.full_like(self.strs, None), missing_value=None),
)
la = LabelArray(self.strs, missing_value="__MISSING__")
with self.assertRaises(TypeError):
la.map(lambda x: None)
def color(self, data, alpha=255):
"""Maps your data values to the pallette with linear interpolation"""
red = np.interp(data, self.range, self.r)
blue = np.interp(data, self.range, self.b)
green = np.interp(data, self.range, self.g)
# Style plot to return a grey color when value is 'nan'
red[np.isnan(red)] = 240
blue[np.isnan(blue)] = 240
green[np.isnan(green)] = 240
colors = np.dstack([red.astype(np.uint8),
green.astype(np.uint8),
blue.astype(np.uint8),
np.full_like(data, alpha, dtype=np.uint8)])
return colors.view(dtype=np.uint32).reshape(data.shape)
def estimate_hyperplane(dbf, comps, phases, current_statevars, comp_dicts, phase_models, parameters):
region_chemical_potentials = []
parameters = OrderedDict(sorted(parameters.items(), key=str))
for cond_dict, phase_flag in comp_dicts:
# We are now considering a particular tie vertex
for key, val in cond_dict.items():
if val is None:
cond_dict[key] = np.nan
cond_dict.update(current_statevars)
if np.any(np.isnan(list(cond_dict.values()))):
# This composition is unknown -- it doesn't contribute to hyperplane estimation
pass
else:
# Extract chemical potential hyperplane from multi-phase calculation
# Note that we consider all phases in the system, not just ones in this tie region
multi_eqdata = equilibrium(dbf, comps, phases, cond_dict, verbose=False,
model=phase_models, scheduler=dask.local.get_sync, parameters=parameters)
# Does there exist only a single phase in the result with zero internal degrees of freedom?
# We should exclude those chemical potentials from the average because they are meaningless.
num_phases = len(np.squeeze(multi_eqdata['Phase'].values != ''))
zero_dof = np.all((multi_eqdata['Y'].values == 1.) | np.isnan(multi_eqdata['Y'].values))
if (num_phases == 1) and zero_dof:
region_chemical_potentials.append(np.full_like(np.squeeze(multi_eqdata['MU'].values), np.nan))
else:
region_chemical_potentials.append(np.squeeze(multi_eqdata['MU'].values))
region_chemical_potentials = np.nanmean(region_chemical_potentials, axis=0, dtype=np.float)
return region_chemical_potentials
def rcosfir(beta, sps, span=None):
"""Generates a raised cosine FIR filter.
:param beta: shape of the raised cosine filter (0-1)
:param sps: number of samples per symbol
:param span: length of the filter in symbols (None => automatic selection)
>>> import arlpy
>>> rc = arlpy.comms.rcosfir(0.25, 6)
>>> bb = arlpy.comms.modulate(arlpy.comms.random_data(100), arlpy.comms.psk())
>>> pb = arlpy.comms.upconvert(bb, 6, 27000, 18000, rc)
"""
if beta < 0 or beta > 1:
raise ValueError('Beta must be between 0 and 1')
if span is None:
# from http://www.commsys.isy.liu.se/TSKS04/lectures/3/MichaelZoltowski_SquareRootRaisedCosine.pdf
# since this recommendation is for root raised cosine filter, it is conservative for a raised cosine filter
span = 33-int(44*beta) if beta < 0.68 else 4
delay = int(span*sps/2)
t = _np.arange(-delay, delay+1, dtype=_np.float)/sps
denom = 1 - (2*beta*t)**2
eps = _np.finfo(float).eps
idx1 = _np.nonzero(_np.abs(denom) > _sqrt(eps))
b = _np.full_like(t, beta*_sin(_pi/(2*beta))/(2*sps))
b[idx1] = _np.sinc(t[idx1]) * _cos(_pi*beta*t[idx1])/denom[idx1] / sps
b /= _sqrt(_np.sum(b**2))
return b
def mask_to_output_target(mask):
target = np.full_like(mask, CONFIG.model.v_false, dtype=np.float32)
target[mask] = CONFIG.model.v_true
return target
def __init__(self, orig_file, image_dataset, label_dataset, mask_dataset, mask_bounds=None):
logging.debug('Loading HDF5 file "{}"'.format(orig_file))
self.file = h5py.File(orig_file, 'r')
self.resolution = None
self._mask_bounds = tuple(map(np.asarray, mask_bounds)) if mask_bounds is not None else None
if image_dataset is None and label_dataset is None:
raise ValueError('HDF5 volume must have either an image or label dataset: {}'.format(orig_file))
if image_dataset is not None:
self.image_data = self.file[image_dataset]
if 'resolution' in self.file[image_dataset].attrs:
self.resolution = np.array(self.file[image_dataset].attrs['resolution'])
if label_dataset is not None:
self.label_data = self.file[label_dataset]
if 'resolution' in self.file[label_dataset].attrs:
resolution = np.array(self.file[label_dataset].attrs['resolution'])
if self.resolution is not None and not np.array_equal(self.resolution, resolution):
logging.warning('HDF5 image and label dataset resolutions differ in %s: %s, %s',
orig_file, self.resolution, resolution)
else:
self.resolution = resolution
else:
self.label_data = None
if mask_dataset is not None:
self.mask_data = self.file[mask_dataset]
else:
self.mask_data = None
if image_dataset is None:
self.image_data = np.full_like(self.label_data, np.NaN, dtype=np.float32)
if self.resolution is None:
self.resolution = np.ones(3)
def test_filled_like(self):
self.check_like_function(np.full_like, 0, True)
self.check_like_function(np.full_like, 1, True)
self.check_like_function(np.full_like, 1000, True)
self.check_like_function(np.full_like, 123.456, True)
self.check_like_function(np.full_like, np.inf, True)
def test_predict_f(self):
with self.test_context():
ms, Xs, _rng = self.prepare()
for m in ms:
mf, vf = m.predict_f(Xs)
assert_array_equal(mf.shape, vf.shape)
assert_array_equal(mf.shape, (10, 1))
assert_array_less(np.full_like(vf, -1e-6), vf)
def test_predict_y(self):
with self.test_context():
ms, Xs, _rng = self.prepare()
for m in ms:
mf, vf = m.predict_y(Xs)
assert_array_equal(mf.shape, vf.shape)
assert_array_equal(mf.shape, (10, 1))
assert_array_less(np.full_like(vf, -1e-6), vf)
def maskedFilter(arr, mask, ksize=30, fill_mask=True,
fn='median'):
'''
fn['mean', 'median']
fill_mask=True:
replaced masked areas with filtered results
fill_mask=False:
masked areas are ignored
'''
if fill_mask:
mask1 = mask
out = arr
else:
mask1 = ~mask
out = np.full_like(arr, fill_value=np.nan)
mask2 = ~mask
if fn == 'mean':
_calcMean(arr, mask1, mask2, out, ksize // 2)
else:
buff = np.empty(shape=(ksize * ksize), dtype=arr.dtype)
_calcMedian(arr, mask1, mask2, out, ksize // 2, buff)
return out
# TODO: only filter method differs
# find better way for replace it than making n extra defs
test_numeric.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def test_filled_like(self):
self.check_like_function(np.full_like, 0, True)
self.check_like_function(np.full_like, 1, True)
self.check_like_function(np.full_like, 1000, True)
self.check_like_function(np.full_like, 123.456, True)
self.check_like_function(np.full_like, np.inf, True)
def setUp(self):
parser = argparse.ArgumentParser()
self.args = parser.parse_args([])
self.args.init_alpha = 1.0
self.args.tolerance = 0.0001
self.args.max_iter = 1000
self.args.n_multi = 1
self.args.verbose = False
phy_in = ['I, A1G ,,',
',H, A3T A5T ,,',
',,F, A6T ,,',
',,,B, A8T ,,',
',,,C, T5A ,,',
',,G, A7T ,,',
',,,D, A9T ,,',
',,,E, A4T ,,',
',A, A2T A4T ,,']
phy = phylotree.Phylotree(phy_in)
ref = "AAAAAAAAA"
reads = list(["1:A,2:T,3:A", "2:T,3:A", "3:A,4:T,5:T", "5:T,6:A",
"6:A,7:T", "6:A,7:T,8:A", "7:T,8:A", "4:T,5:T",
"1:A,2:T,3:T,4:T", "5:A,6:T,7:A,8:A"])
haps = list('ABCDEFGHI')
self.input_mat = preprocess.build_em_matrix(ref, phy, reads,
haps, self.args)
self.wts = numpy.ones(len(reads))
self.true_props = numpy.array(
[0.0, 0.8, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0])
inf = float('Inf')
self.true_haps = numpy.full_like(self.input_mat, -inf)
self.true_haps[0:8, 1] = 0.0
self.true_haps[8:10, 4] = 0.0
def test_filled_like(self):
self.check_like_function(np.full_like, 0, True)
self.check_like_function(np.full_like, 1, True)
self.check_like_function(np.full_like, 1000, True)
self.check_like_function(np.full_like, 123.456, True)
self.check_like_function(np.full_like, np.inf, True)
def _initialize(self, flat_size, fill_value, dtype):
if self.nans:
# For avoiding branches
flat_size += 1
if self.forced_fill_value is None:
ret = np.full(flat_size, fill_value, dtype=dtype)
else:
ret = np.full(flat_size, self.forced_fill_value, dtype=dtype)
counter = np.full_like(ret, self.counter_fill_value, dtype=self.counter_dtype)
if self.mean_fill_value is not None:
mean = np.full_like(ret, self.mean_fill_value, dtype=ret.dtype)
else:
mean = None
return ret, counter, mean
def from_lib(name, cell, pad=0):
blocks = np.asarray(cell["blocks"], dtype=np.uint8)
_, width, length = blocks.shape
data = np.asarray(cell["data"], dtype=np.uint8)
mask = np.full_like(blocks, True, dtype=np.bool)
delay = cell["delay"]
if pad != 0:
pad_out = (pad,)
blocks = np.pad(blocks, pad_out, "constant")
data = np.pad(data, pad_out, "constant")
mask = np.pad(mask, pad_out, "constant")
# create a padded base immediately below it
stone = block_names.index("stone")
y = pad-1
xs = pad
zs = pad
xe = xs + length
ze = zs + width
blocks[y, zs:ze, xs:xe] = stone
# build ports
ports = {}
for pin, d in cell["pins"].iteritems():
y, z, x = d["coordinates"]
coord = (y + pad, z + pad, x + pad)
facing = d["facing"]
direction = d["direction"]
level = d["level"]
ports[pin] = {"coordinates": coord,
"facing": facing,
"direction": direction,
"level": level}
return Cell(blocks, data, mask, name, ports, delay)
def _reinforce_box_constraint(kind, enforce_feasibility, x0,
relative_tolerance=0.01,
absolute_tolerance=0.01):
"""Reinforce box constraint"""
x0 = np.copy(np.asarray(x0, dtype=float))
keyword = kind[0]
if keyword == "greater":
lb = np.asarray(kind[1], dtype=float)
ub = np.full_like(lb, np.inf, dtype=float)
elif keyword == "less":
ub = np.asarray(kind[1], dtype=float)
lb = np.full_like(ub, -np.inf, dtype=float)
elif keyword == "interval":
lb = np.asarray(kind[1], dtype=float)
ub = np.asarray(kind[2], dtype=float)
x0_new = np.copy(x0)
for i in range(np.size(x0)):
if enforce_feasibility[i]:
if not np.isinf(lb[i]):
lower_bound = min(lb[i]+absolute_tolerance,
lb[i]+relative_tolerance*(ub[i]-lb[i]))
x0_new[i] = max(x0_new[i], lower_bound)
if not np.isinf(ub[i]):
upper_bound = max(ub[i]-absolute_tolerance,
ub[i]-relative_tolerance*(ub[i]-lb[i]))
x0_new[i] = min(x0_new[i], upper_bound)
return x0_new
def test_filled_like(self):
self.check_like_function(np.full_like, 0, True)
self.check_like_function(np.full_like, 1, True)
self.check_like_function(np.full_like, 1000, True)
self.check_like_function(np.full_like, 123.456, True)
self.check_like_function(np.full_like, np.inf, True)
def test_threshold_boundingzero(self):
"""Test fuzzy threshold of zero."""
bounds = (-1.0, 1.0)
plugin = Threshold(0.0, fuzzy_bounds=bounds)
result = plugin.process(self.cube)
expected_result_array = np.full_like(
self.cube.data, fill_value=0.5).reshape(1, 1, 5, 5)
expected_result_array[0][0][2][2] = 0.75
self.assertArrayAlmostEqual(result.data, expected_result_array)
def test_threshold_boundingzero_above(self):
"""Test fuzzy threshold of zero where data are above upper-bound."""
bounds = (-0.1, 0.1)
plugin = Threshold(0.0, fuzzy_bounds=bounds)
result = plugin.process(self.cube)
expected_result_array = np.full_like(
self.cube.data, fill_value=0.5).reshape(1, 1, 5, 5)
expected_result_array[0][0][2][2] = 1.
self.assertArrayAlmostEqual(result.data, expected_result_array)
def test_threshold_boundingbelowzero(self):
"""Test fuzzy threshold of below-zero."""
bounds = (-1.0, 1.0)
plugin = Threshold(0.0, fuzzy_bounds=bounds, below_thresh_ok=True)
result = plugin.process(self.cube)
expected_result_array = np.full_like(
self.cube.data, fill_value=0.5).reshape(1, 1, 5, 5)
expected_result_array[0][0][2][2] = 0.25
self.assertArrayAlmostEqual(result.data, expected_result_array)
def comp_diff_weights(weights, quantize_vals):
diff_weights = np.full_like(weights, np.inf)
for q in quantize_vals:
d2 = (weights - q)**2.0
midx = np.where(d2 < diff_weights)[0]
diff_weights[midx] = d2[midx]
return diff_weights