def random_walk_rec(current, trace, length, successor_fn):
import numpy.random as random
if length == 0:
return current
else:
sucs = successor_fn(current)
first = random.randint(len(sucs))
now = first
while True:
suc = sucs[now]
try:
assert not np.any([np.all(np.equal(suc, t)) for t in trace])
result = random_walk_rec(suc, [*trace, suc], length-1, successor_fn)
assert result is not None
return result
except AssertionError:
now = (now+1)%len(sucs)
if now == first:
print("B",end="")
return None
else:
continue
python类any()的实例源码
def test_against_numpy(self):
""" Test iany against numpy.any """
stream = [np.zeros((8, 16, 2)) for _ in range(11)]
stream[3][3,0,1] = 1 # so that np.all(axis = None) evaluates to False
stack = np.stack(stream, axis = -1)
with self.subTest('axis = None'):
from_numpy = np.any(stack, axis = None)
from_stream = last(iany(stream, axis = None))
self.assertEqual(from_numpy, from_stream)
for axis in range(stack.ndim):
with self.subTest('axis = {}'.format(axis)):
from_numpy = np.any(stack, axis = axis)
from_stream = last(iany(stream, axis = axis))
self.assertTrue(np.allclose(from_numpy, from_stream))
def _set_barcode_reads_metrics(self, read_type, read_type_set, bc):
for genome in self.genomes:
is_read_type = (genome, cr_constants.TRANSCRIPTOME_REGION) in read_type_set
if is_read_type:
barcode_reads = self._get_metric_attr(
'barcode_reads', genome, cr_constants.TRANSCRIPTOME_REGION, read_type)
barcode_reads.add(bc)
# Don't always-report the multi prefix for the barcode_reads metrics
if self.has_multiple_genomes:
is_read_type = any([(genome, cr_constants.TRANSCRIPTOME_REGION) in read_type_set for genome in self.genomes])
if is_read_type:
multi_barcode_reads = self._get_metric_attr(
'barcode_reads', cr_constants.MULTI_REFS_PREFIX,
cr_constants.TRANSCRIPTOME_REGION, read_type)
multi_barcode_reads.add(bc)
def updateSpots(self, dataSet=None):
if dataSet is None:
dataSet = self.data
invalidate = False
if self.opts['pxMode']:
mask = np.equal(dataSet['sourceRect'], None)
if np.any(mask):
invalidate = True
opts = self.getSpotOpts(dataSet[mask])
sourceRect = self.fragmentAtlas.getSymbolCoords(opts)
dataSet['sourceRect'][mask] = sourceRect
self.fragmentAtlas.getAtlas() # generate atlas so source widths are available.
dataSet['width'] = np.array(list(imap(QtCore.QRectF.width, dataSet['sourceRect'])))/2
dataSet['targetRect'] = None
self._maxSpotPxWidth = self.fragmentAtlas.max_width
else:
self._maxSpotWidth = 0
self._maxSpotPxWidth = 0
self.measureSpotSizes(dataSet)
if invalidate:
self.invalidate()
def updateSpots(self, dataSet=None):
if dataSet is None:
dataSet = self.data
invalidate = False
if self.opts['pxMode']:
mask = np.equal(dataSet['sourceRect'], None)
if np.any(mask):
invalidate = True
opts = self.getSpotOpts(dataSet[mask])
sourceRect = self.fragmentAtlas.getSymbolCoords(opts)
dataSet['sourceRect'][mask] = sourceRect
self.fragmentAtlas.getAtlas() # generate atlas so source widths are available.
dataSet['width'] = np.array(list(imap(QtCore.QRectF.width, dataSet['sourceRect'])))/2
dataSet['targetRect'] = None
self._maxSpotPxWidth = self.fragmentAtlas.max_width
else:
self._maxSpotWidth = 0
self._maxSpotPxWidth = 0
self.measureSpotSizes(dataSet)
if invalidate:
self.invalidate()
test_bidirectional_rnn_encoder.py 文件源码
项目:yoctol-keras-layer-zoo
作者: Yoctol
项目源码
文件源码
阅读 35
收藏 0
点赞 0
评论 0
def test_mask_value(self):
result = self.model.predict(self.data)
np.testing.assert_array_almost_equal(
result[:, 1:, :],
np.zeros((
self.data_size,
self.max_length - 1,
self.encoding_size
))
)
np.testing.assert_equal(
np.any(
np.not_equal(
result[:, 0:1, self.cell_units:],
np.zeros((self.data_size, 1, self.cell_units))
)
),
True
)
def test_image_data_mask(self):
mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
mask_tensor = self.model._output_mask_cache[mask_cache_key]
mask = mask_tensor.eval(
session=K.get_session(),
feed_dict={self.model.input: self.data}
)
self.assertTrue(
np.all(
mask[:, self.x_start:self.x_end]
)
)
self.assertFalse(
np.any(
mask[:, :self.x_start]
)
)
self.assertFalse(
np.any(
mask[:, self.x_end:]
)
)
def test_seq_data_mask(self):
mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
mask_tensor = self.model._output_mask_cache[mask_cache_key]
mask = mask_tensor.eval(
session=K.get_session(),
feed_dict={self.model.input: self.seq_data}
)
self.assertTrue(
np.all(
mask[:, :self.seq_data_max_length]
)
)
self.assertFalse(
np.any(
mask[:, self.seq_data_max_length:]
)
)
def check_for_normalization(self, data_header):
channels = [c.upper() for c in data_header.ch_names]
if not data_header.info['sfreq'] == 100 and not self.resample:
print('WARNING: Data not with 100hz. Use resample=True for resampling')
# if not data_header.info['lowpass'] == 50:
# print('WARNING: lowpass not at 50')
if (not self.channels['EEG'] in channels) and not np.any(([ch in channels for ch in self.channels['EEG']])):
print('WARNING: EEG channel missing')
if not self.channels['EMG'] in channels:
print('WARNING: EMG channel missing')
if not self.channels['EOG'] in channels:
print('WARNING: EOG channel missing')
if self.references['RefEEG'] and not self.references['RefEEG'] in channels:
print('WARNING: RefEEG channel missing')
if self.references['RefEMG'] and not self.references['RefEMG'] in channels:
print('WARNING: RefEMG channel missing')
if self.references['RefEOG'] and not self.references['RefEOG'] in channels:
print('WARNING: RefEOG channel missing')
def __init__(self, min_pt, max_pt, frame='unspecified'):
"""Initialize a box.
Parameters
----------
min_pt : :obj:`numpy.ndarray` of float
The minimum x, y, and (optionally) z points.
max_pt : :obj:`numpy.ndarray` of float
The maximum x, y, and (optionally) z points.
frame : :obj:`str`
The frame in which this box is placed.
Raises
------
ValueError
If max_pt is not strictly larger than min_pt in all dims.
"""
if np.any((max_pt - min_pt) < 0):
raise ValueError('Min point must be smaller than max point')
self._min_pt = min_pt
self._max_pt = max_pt
self._frame = frame
def _check_valid_data(self, data):
"""Checks that the incoming data is a 3 x #elements ndarray of normal
vectors.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to verify.
Raises
------
ValueError
If the data is not of the correct shape or type, or if the vectors
therein are not normalized.
"""
if data.dtype.type != np.float32 and data.dtype.type != np.float64:
raise ValueError('Must initialize normals clouds with a numpy float ndarray')
if data.shape[0] != 3:
raise ValueError('Illegal data array passed to normal cloud. Must have 3 coordinates')
if len(data.shape) > 2:
raise ValueError('Illegal data array passed to normal cloud. Must have 1 or 2 dimensions')
if np.any((np.abs(np.linalg.norm(data, axis=0) - 1) > 1e-4) & (np.linalg.norm(data, axis=0) != 0)):
raise ValueError('Illegal data array passed to normal cloud. Must have norm=1.0 or norm=0.0')
def process(self, nodemeta, timestamp, data, description):
if self._job.getdata('perf')['active'] != True:
self._error = ProcessingError.RAW_COUNTER_UNAVAILABLE
return False
if len(data[0]) == 0:
# Ignore datapoints where no data stored
return True
if nodemeta.nodename not in self._data:
self._data[nodemeta.nodename] = {"x": [], "t": []}
info = self._data[nodemeta.nodename]
info['x'].append(1.0 * numpy.sum(data[0]))
info['t'].append(timestamp)
if len(info['x']) > 1:
if numpy.any(info['x'][-1] - info['x'][-2] < 0.0):
self._error = ProcessingError.PMDA_RESTARTED_DURING_JOB
return False
return True
def process(self, nodemeta, timestamp, data, description):
if self._job.getdata('perf')['active'] != True:
self._error = ProcessingError.RAW_COUNTER_UNAVAILABLE
return False
ndata = numpy.array(data)
if nodemeta.nodename not in self._first:
self._first[nodemeta.nodename] = ndata
return True
if ndata.shape == self._first[nodemeta.nodename].shape:
self._data[nodemeta.nodename] = numpy.sum(ndata - self._first[nodemeta.nodename])
if numpy.any(numpy.fabs(self._data[nodemeta.nodename]) != self._data[nodemeta.nodename]):
self._error = ProcessingError.PMDA_RESTARTED_DURING_JOB
return False
else:
# Perf counters changed during the job
self._error = ProcessingError.RAW_COUNTER_UNAVAILABLE
return False
return True
def dspneumann(n, kr):
"""Derivative spherical Neumann (Bessel second kind) of order n at kr
Parameters
----------
n : array_like
Order
kr: array_like
Argument
Returns
-------
Yv' : complex float
Derivative of spherical Neumann (Bessel second kind)
"""
n, kr = scalar_broadcast_match(n, kr)
if _np.any(n < 0) | _np.any(_np.mod(n, 1) != 0) | _np.any(_np.mod(kr, 1) != 0):
return spneumann(n, kr) * n / kr - spneumann(n + 1, kr)
else:
return scy.spherical_yn(n.astype(_np.int), kr.astype(_np.complex), derivative=True)
def crop_pad(image, corner, shape):
ndim = len(corner)
corner = [int(round(c)) for c in corner]
shape = [int(round(s)) for s in shape]
original = image.shape[-ndim:]
zipped = zip(corner, shape, original)
if np.any(c < 0 or c + s > o for (c, s, o) in zipped):
no_padding = [(0, 0)] * (image.ndim - ndim)
padding = [(max(-c, 0), max(c + s - o, 0)) for (c, s, o) in zipped]
corner = [c + max(-c, 0) for c in corner]
image_temp = np.pad(image, no_padding + padding, mode=str('constant'))
else:
image_temp = image
no_crop = [slice(o+1) for o in image.shape[:-ndim]]
crop = [slice(c, c+s) for (c, s) in zip(corner, shape)]
return image_temp[no_crop + crop]
def slice_image(pos, image, radius):
""" Slice a box around a group of features from an image.
The box is the smallest box that contains all coordinates up to `radius`
from any coordinate.
Parameters
----------
image : ndarray
The image that will be sliced
pos : iterable
An iterable (e.g. list or ndarray) that contains the feature positions
radius : number or tuple of numbers
Defines the size of the slice. Every pixel that has a distance lower or
equal to `radius` to a feature position is included.
Returns
-------
tuple of:
- the sliced image
- the coordinate of the slice origin (top-left pixel)
"""
slices, origin = get_slice(pos, image.shape, radius)
return image[slices], origin
def _logcdf(self, samples):
lower = np.full(2, -np.inf)
upper = norm.ppf(samples)
limit_flags = np.zeros(2)
if upper.shape[0] > 0:
def func1d(upper1d):
'''
Calculates the multivariate normal cumulative distribution
function of a single sample.
'''
return mvn.mvndst(lower, upper1d, limit_flags, self.theta)[1]
vals = np.apply_along_axis(func1d, -1, upper)
else:
vals = np.empty((0, ))
old_settings = np.seterr(divide='ignore')
vals = np.log(vals)
np.seterr(**old_settings)
vals[np.any(samples == 0.0, axis=1)] = -np.inf
vals[samples[:, 0] == 1.0] = np.log(samples[samples[:, 0] == 1.0, 1])
vals[samples[:, 1] == 1.0] = np.log(samples[samples[:, 1] == 1.0, 0])
return vals
def eval(self, t):
# given a time vector t, return the design matrix column vector(s)
if self.type is None:
return np.array([])
hl = np.zeros((t.shape[0],))
ht = np.zeros((t.shape[0],))
if self.type in (0,2):
hl[t >= self.year] = np.log10(1 + (t[t >= self.year] - self.year) / self.T)
if self.type in (1,2):
ht[t >= self.year] = 1
return np.append(ht,hl) if np.any(hl) else ht
def validate_transitions_cpu_old(transitions, **kwargs):
pre = np.array(transitions[0])
suc = np.array(transitions[1])
base = setting['base']
width = pre.shape[1] // base
height = pre.shape[1] // base
load(width,height)
pre_validation = validate_states(pre, **kwargs)
suc_validation = validate_states(suc, **kwargs)
results = []
for pre, suc, pre_validation, suc_validation in zip(pre, suc, pre_validation, suc_validation):
if pre_validation and suc_validation:
c = to_configs(np.array([pre, suc]), verbose=False)
succs = successors(c[0], width, height)
results.append(np.any(np.all(np.equal(succs, c[1]), axis=1)))
else:
results.append(False)
return results
def validate_transitions(transitions, check_states=True, **kwargs):
pre = np.array(transitions[0])
suc = np.array(transitions[1])
if check_states:
pre_validation = validate_states(pre, verbose=False, **kwargs)
suc_validation = validate_states(suc, verbose=False, **kwargs)
pre_configs = to_configs(pre, verbose=False, **kwargs)
suc_configs = to_configs(suc, verbose=False, **kwargs)
results = []
if check_states:
for pre_c, suc_c, pre_validation, suc_validation in zip(pre_configs, suc_configs, pre_validation, suc_validation):
if pre_validation and suc_validation:
succs = successors(pre_c)
results.append(np.any(np.all(np.equal(succs, suc_c), axis=1)))
else:
results.append(False)
else:
for pre_c, suc_c in zip(pre_configs, suc_configs):
succs = successors(pre_c)
results.append(np.any(np.all(np.equal(succs, suc_c), axis=1)))
return results
def transform(self, X):
feature_range = self.feature_range
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if np.any(X > feature_range[1]) or np.any(X < feature_range[0]):
warnings.warn(
"You got data that are out of the range: {}"
.format(feature_range)
)
X[X > feature_range[1]] = feature_range[1]
X[X < feature_range[0]] = feature_range[0]
return X
def vorEdges(vor, far):
"""
Given a voronoi tesselation, retuns the set of voronoi edges.
far is the length of the "infinity" edges
"""
edges = []
for simplex in vor.ridge_vertices:
simplex = numpy.asarray(simplex)
if numpy.all(simplex >= 0):
edge = {}
edge['p1'], edge['p2'] = vor.vertices[simplex, 0], vor.vertices[simplex, 1]
edge['p1'] = numpy.array([vor.vertices[simplex, 0][0], vor.vertices[simplex, 1][0]])
edge['p2'] = numpy.array([vor.vertices[simplex, 0][1], vor.vertices[simplex, 1][1]])
edge['t'] = (edge['p2'] - edge['p1']) / numpy.linalg.norm(edge['p2'] - edge['p1'])
edges.append(edge)
ptp_bound = vor.points.ptp(axis=0)
center = vor.points.mean(axis=0)
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = numpy.asarray(simplex)
if numpy.any(simplex < 0):
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= numpy.linalg.norm(t)
n = numpy.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = numpy.sign(numpy.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + direction * ptp_bound.max() * far
edge = {}
edge['p1'], edge['p2'] = numpy.array([vor.vertices[i, 0], far_point[0]]), numpy.array(
[vor.vertices[i, 1], far_point[1]])
edge['p1'], edge['p2'] = vor.vertices[i, :], far_point
edge['t'] = (edge['p2'] - edge['p1']) / numpy.linalg.norm(edge['p2'] - edge['p1'])
edges.append(edge)
return edges
def sometrue(a, axis=None, out=None, keepdims=False):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function
"""
arr = asanyarray(a)
try:
return arr.any(axis=axis, out=out, keepdims=keepdims)
except TypeError:
return arr.any(axis=axis, out=out)
def test_ddof_too_big(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
dsize = [len(d) for d in _rdat]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in range(5):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
tgt = [ddof >= d for d in dsize]
res = nf(_ndat, axis=1, ddof=ddof)
assert_equal(np.isnan(res), tgt)
if any(tgt):
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
else:
assert_(len(w) == 0)
def __iadd__(self, other):
"""
Add other to self in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
else:
if m is not nomask:
self._mask += m
self._data.__iadd__(np.where(self._mask, self.dtype.type(0),
getdata(other)))
return self
def __idiv__(self, other):
"""
Divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__idiv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __ifloordiv__(self, other):
"""
Floor divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.floor_divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__ifloordiv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __ipow__(self, other):
"""
Raise self to the power other, in place.
"""
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate(divide='ignore', invalid='ignore'):
self._data.__ipow__(np.where(self._mask, self.dtype.type(1),
other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.copyto(self._data, self.fill_value, where=invalid)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
def test_comparisons(self):
A = np.arange(100).reshape(10, 10)
mA = matrix(A)
mB = matrix(A) + 0.1
assert_(np.all(mB == A+0.1))
assert_(np.all(mB == matrix(A+0.1)))
assert_(not np.any(mB == matrix(A-0.1)))
assert_(np.all(mA < mB))
assert_(np.all(mA <= mB))
assert_(np.all(mA <= mA))
assert_(not np.any(mA < mA))
assert_(not np.any(mB < mA))
assert_(np.all(mB >= mA))
assert_(np.all(mB >= mB))
assert_(not np.any(mB > mB))
assert_(np.all(mA == mA))
assert_(not np.any(mA == mB))
assert_(np.all(mB != mA))
assert_(not np.all(abs(mA) > 0))
assert_(np.all(abs(mB > 0)))
def loadData (self, filename, verbose=True, replace_missing=True):
''' Get the data from a text file in one of 3 formats: matrix, sparse, binary_sparse'''
if verbose: print("========= Reading " + filename)
start = time.time()
if self.use_pickle and os.path.exists (os.path.join (self.tmp_dir, os.path.basename(filename) + ".pickle")):
with open (os.path.join (self.tmp_dir, os.path.basename(filename) + ".pickle"), "r") as pickle_file:
vprint (verbose, "Loading pickle file : " + os.path.join(self.tmp_dir, os.path.basename(filename) + ".pickle"))
return pickle.load(pickle_file)
if 'format' not in self.info.keys():
self.getFormatData(filename)
if 'feat_num' not in self.info.keys():
self.getNbrFeatures(filename)
data_func = {'dense':data_io.data, 'sparse':data_io.data_sparse, 'sparse_binary':data_io.data_binary_sparse}
data = data_func[self.info['format']](filename, self.info['feat_num'])
# INPORTANT: when we replace missing values we double the number of variables
if self.info['format']=='dense' and replace_missing and np.any(map(np.isnan,data)):
vprint (verbose, "Replace missing values by 0 (slow, sorry)")
data = data_converter.replace_missing(data)
if self.use_pickle:
with open (os.path.join (self.tmp_dir, os.path.basename(filename) + ".pickle"), "wb") as pickle_file:
vprint (verbose, "Saving pickle file : " + os.path.join (self.tmp_dir, os.path.basename(filename) + ".pickle"))
p = pickle.Pickler(pickle_file)
p.fast = True
p.dump(data)
end = time.time()
if verbose: print( "[+] Success in %5.2f sec" % (end - start))
return data