def iteratorFn(self, data):
## Return 1) a function that will provide an iterator for data and 2) a list of header strings
if isinstance(data, list) or isinstance(data, tuple):
return lambda d: d.__iter__(), None
elif isinstance(data, dict):
return lambda d: iter(d.values()), list(map(asUnicode, data.keys()))
elif (hasattr(data, 'implements') and data.implements('MetaArray')):
if data.axisHasColumns(0):
header = [asUnicode(data.columnName(0, i)) for i in range(data.shape[0])]
elif data.axisHasValues(0):
header = list(map(asUnicode, data.xvals(0)))
else:
header = None
return self.iterFirstAxis, header
elif isinstance(data, np.ndarray):
return self.iterFirstAxis, None
elif isinstance(data, np.void):
return self.iterate, list(map(asUnicode, data.dtype.names))
elif data is None:
return (None,None)
else:
msg = "Don't know how to iterate over data type: {!s}".format(type(data))
raise TypeError(msg)
python类ndarray()的实例源码
def hoverOver(self, items):
#print "FlowchartWidget.hoverOver called."
term = None
for item in items:
if item is self.hoverItem:
return
self.hoverItem = item
if hasattr(item, 'term') and isinstance(item.term, Terminal):
term = item.term
break
if term is None:
self.hoverText.setPlainText("")
else:
val = term.value()
if isinstance(val, ndarray):
val = "%s %s %s" % (type(val).__name__, str(val.shape), str(val.dtype))
else:
val = str(val)
if len(val) > 400:
val = val[:400] + "..."
self.hoverText.setPlainText("%s.%s = %s" % (term.node().name(), term.name(), val))
#self.hoverLabel.setCursorPosition(0)
def applyFilter(data, b, a, padding=100, bidir=True):
"""Apply a linear filter with coefficients a, b. Optionally pad the data before filtering
and/or run the filter in both directions."""
try:
import scipy.signal
except ImportError:
raise Exception("applyFilter() requires the package scipy.signal.")
d1 = data.view(np.ndarray)
if padding > 0:
d1 = np.hstack([d1[:padding], d1, d1[-padding:]])
if bidir:
d1 = scipy.signal.lfilter(b, a, scipy.signal.lfilter(b, a, d1)[::-1])[::-1]
else:
d1 = scipy.signal.lfilter(b, a, d1)
if padding > 0:
d1 = d1[padding:-padding]
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d1, info=data.infoCopy())
else:
return d1
def besselFilter(data, cutoff, order=1, dt=None, btype='low', bidir=True):
"""return data passed through bessel filter"""
try:
import scipy.signal
except ImportError:
raise Exception("besselFilter() requires the package scipy.signal.")
if dt is None:
try:
tvals = data.xvals('Time')
dt = (tvals[-1]-tvals[0]) / (len(tvals)-1)
except:
dt = 1.0
b,a = scipy.signal.bessel(order, cutoff * dt, btype=btype)
return applyFilter(data, b, a, bidir=bidir)
#base = data.mean()
#d1 = scipy.signal.lfilter(b, a, data.view(ndarray)-base) + base
#if (hasattr(data, 'implements') and data.implements('MetaArray')):
#return MetaArray(d1, info=data.infoCopy())
#return d1
def modeFilter(data, window=500, step=None, bins=None):
"""Filter based on histogram-based mode function"""
d1 = data.view(np.ndarray)
vals = []
l2 = int(window/2.)
if step is None:
step = l2
i = 0
while True:
if i > len(data)-step:
break
vals.append(mode(d1[i:i+window], bins))
i += step
chunks = [np.linspace(vals[0], vals[0], l2)]
for i in range(len(vals)-1):
chunks.append(np.linspace(vals[i], vals[i+1], step))
remain = len(data) - step*(len(vals)-1) - l2
chunks.append(np.linspace(vals[-1], vals[-1], remain))
d2 = np.hstack(chunks)
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d2, info=data.infoCopy())
return d2
def _axisSlice(self, i, cols):
#print "axisSlice", i, cols
if 'cols' in self._info[i] or 'values' in self._info[i]:
ax = self._axisCopy(i)
if 'cols' in ax:
#print " slicing columns..", array(ax['cols']), cols
sl = np.array(ax['cols'])[cols]
if isinstance(sl, np.ndarray):
sl = list(sl)
ax['cols'] = sl
#print " result:", ax['cols']
if 'values' in ax:
ax['values'] = np.array(ax['values'])[cols]
else:
ax = self._info[i]
#print " ", ax
return ax
def __getitem__(self, i):
'''
Get the item or slice :attr:`i`.
'''
obj = super(IrregularlySampledSignal, self).__getitem__(i)
if isinstance(i, int): # a single point in time across all channels
obj = pq.Quantity(obj.magnitude, units=obj.units)
elif isinstance(i, tuple):
j, k = i
if isinstance(j, int): # a single point in time across some channels
obj = pq.Quantity(obj.magnitude, units=obj.units)
else:
if isinstance(j, slice):
obj.times = self.times.__getitem__(j)
elif isinstance(j, np.ndarray):
raise NotImplementedError("Arrays not yet supported")
else:
raise TypeError("%s not supported" % type(j))
if isinstance(k, int):
obj = obj.reshape(-1, 1)
elif isinstance(i, slice):
obj.times = self.times.__getitem__(i)
else:
raise IndexError("index should be an integer, tuple or slice")
return obj
def _check_annotations(value):
"""
Recursively check that value is either of a "simple" type (number, string,
date/time) or is a (possibly nested) dict, list or numpy array containing
only simple types.
"""
if isinstance(value, np.ndarray):
if not issubclass(value.dtype.type, ALLOWED_ANNOTATION_TYPES):
raise ValueError("Invalid annotation. NumPy arrays with dtype %s"
"are not allowed" % value.dtype.type)
elif isinstance(value, dict):
for element in value.values():
_check_annotations(element)
elif isinstance(value, (list, tuple)):
for element in value:
_check_annotations(element)
elif not isinstance(value, ALLOWED_ANNOTATION_TYPES):
raise ValueError("Invalid annotation. Annotations of type %s are not"
"allowed" % type(value))
def _check_time_in_range(value, t_start, t_stop, view=False):
'''
Verify that all times in :attr:`value` are between :attr:`t_start`
and :attr:`t_stop` (inclusive.
If :attr:`view` is True, vies are used for the test.
Using drastically increases the speed, but is only safe if you are
certain that the dtype and units are the same
'''
if not value.size:
return
if view:
value = value.view(np.ndarray)
t_start = t_start.view(np.ndarray)
t_stop = t_stop.view(np.ndarray)
if value.min() < t_start:
raise ValueError("The first spike (%s) is before t_start (%s)" %
(value, t_start))
if value.max() > t_stop:
raise ValueError("The last spike (%s) is after t_stop (%s)" %
(value, t_stop))
def assert_arrays_almost_equal(a, b, threshold, dtype=False):
'''
Check if two arrays have the same shape and contents that differ
by abs(a - b) <= threshold for all elements.
If threshold is None, do an absolute comparison rather than a relative
comparison.
'''
if threshold is None:
return assert_arrays_equal(a, b, dtype=dtype)
assert isinstance(a, np.ndarray), "a is a %s" % type(a)
assert isinstance(b, np.ndarray), "b is a %s" % type(b)
assert a.shape == b.shape, "%s != %s" % (a, b)
#assert a.dtype == b.dtype, "%s and %b not same dtype %s %s" % (a, b,
# a.dtype,
# b.dtype)
if a.dtype.kind in ['f', 'c', 'i']:
assert (abs(a - b) < threshold).all(), \
"abs(%s - %s) max(|a - b|) = %s threshold:%s" % \
(a, b, (abs(a - b)).max(), threshold)
if dtype:
assert a.dtype == b.dtype, \
"%s and %s not same dtype %s and %s" % (a, b, a.dtype, b.dtype)
def write(filename, predictions):
''' Write prediction scores in prescribed format'''
with open(filename, "w") as output_file:
for row in predictions:
if type(row) is not np.ndarray and type(row) is not list:
row = [row]
for val in row:
output_file.write('{:g} '.format(float(val)))
output_file.write('\n')
def get_concatenated_sets(lang_codes, feature_set_str):
feature_set_parts = feature_set_str.split("+")
feature_names = []
feature_values = np.ndarray((len(lang_codes),0))
for feature_set_part in feature_set_parts:
more_feature_names, more_feature_values = get_union_sets(lang_codes, feature_set_part)
feature_names += more_feature_names
feature_values = np.concatenate([feature_values, more_feature_values], axis=1)
return feature_names, feature_values
def test_get_vector(self):
net = nt.Netlist(netlists_path + 'dc_ac_check.net')
net.setup_sim('dc', 'v1 0 1 .3')
net.run()
val = net.get_vector('V(1)', 'dc1')
assert isinstance(val, numpy.ndarray)
assert len(val) == 4
ng.reset()
def test_real(self):
val = ng.get_data('const.e')
assert type(val) == np.ndarray
assert len(val) == 1
assert val.dtype == 'float64'
assert val[0] == pytest.approx(np.e)
def test_cmplx(self):
val = ng.get_data('const.i')
assert type(val) == np.ndarray
assert len(val) == 1
assert val.dtype == 'complex128'
assert val[0] == pytest.approx(1j)
def apply(self, x):
s = x.shape
if isinstance(x, np.ndarray):
return np.dot(x.reshape((s[0],np.prod(s[1:]))) - self.mean.get_value(), self.ZCA_mat.get_value()).reshape(s)
elif isinstance(x, T.TensorVariable):
return T.dot(x.flatten(2) - self.mean.dimshuffle('x',0), self.ZCA_mat).reshape(s)
else:
raise NotImplementedError("Whitening only implemented for numpy arrays or Theano TensorVariables")
def invert(self, x):
s = x.shape
if isinstance(x, np.ndarray):
return (np.dot(x.reshape((s[0],np.prod(s[1:]))), self.inv_ZCA_mat.get_value()) + self.mean.get_value()).reshape(s)
elif isinstance(x, T.TensorVariable):
return (T.dot(x.flatten(2), self.inv_ZCA_mat) + self.mean.dimshuffle('x',0)).reshape(s)
else:
raise NotImplementedError("Whitening only implemented for numpy arrays or Theano TensorVariables")
# T.nnet.relu has some issues with very large inputs, this is more stable
def _clip(self, action):
maxs = self.env.action_space.high
mins = self.env.action_space.low
if isinstance(action, np.ndarray):
np.clip(action, mins, maxs, out=action)
elif isinstance(action, list):
for i in range(len(action)):
action[i] = clip(action[i], mins[i], maxs[i])
else:
action = clip(action, mins[0], maxs[0])
return action
general_utils.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def get_minibatches(data, minibatch_size, shuffle=True):
"""
Iterates through the provided data one minibatch at at time. You can use this function to
iterate through data in minibatches as follows:
for inputs_minibatch in get_minibatches(inputs, minibatch_size):
...
Or with multiple data sources:
for inputs_minibatch, labels_minibatch in get_minibatches([inputs, labels], minibatch_size):
...
Args:
data: there are two possible values:
- a list or numpy array
- a list or tuple where each element is either a list or numpy array
minibatch_size: the maximum number of items in a minibatch
shuffle: whether to randomize the order of returned data
Returns:
minibatches: the return value depends on data:
- If data is a list/array it yields the next minibatch of data.
- If data a list of lists/arrays it returns the next minibatch of each element in the
list. This can be used to iterate through multiple data sources
(e.g., features and labels) at the same time.
"""
list_data = isinstance(data, (list, tuple)) and isinstance(data[0], (list,np.ndarray))
data_size = len(data[0]) if list_data else len(data)
indices = np.arange(data_size)
if shuffle:
np.random.shuffle(indices)
for minibatch_start in np.arange(0, data_size, minibatch_size):
minibatch_indices = indices[minibatch_start:minibatch_start + minibatch_size]
yield [minibatch(d, minibatch_indices) for d in data] if list_data \
else minibatch(data, minibatch_indices)
general_utils.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def minibatch(data, minibatch_idx):
return data[minibatch_idx] if isinstance(data, np.ndarray) else [data[i] for i in minibatch_idx]