def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None):
self._open()
t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding)
if self.time_axis == 0:
local_chunk = self.data[t_start:t_stop, :]
elif self.time_axis == 1:
local_chunk = self.data[:, t_start:t_stop].T
self._close()
if nodes is not None:
if not numpy.all(nodes == numpy.arange(self.nb_channels)):
local_chunk = numpy.take(local_chunk, nodes, axis=1)
return self._scale_data_to_float32(local_chunk)
python类take()的实例源码
def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None):
t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding)
local_shape = t_stop - t_start
local_chunk = numpy.zeros((self.nb_channels, local_shape), dtype=self.data_dtype)
data_slice = self._get_slice_(t_start, t_stop)
self._open()
count = 0
for s in data_slice:
t_slice = len(s)//self.nb_channels
local_chunk[:, count:count + t_slice] = self.data[s].reshape(self.nb_channels, len(s)//self.nb_channels)
count += t_slice
local_chunk = local_chunk.T
self._close()
if nodes is not None:
if not numpy.all(nodes == numpy.arange(self.nb_channels)):
local_chunk = numpy.take(local_chunk, nodes, axis=1)
return self._scale_data_to_float32(local_chunk)
def load(shape,vertex_array):
destination = vertex_array[gx.VA_PTNMTXIDX.name]
vertex_index = 0
matrix_table = numpy.zeros(10,numpy.uint32)
for batch in shape.batches:
source = numpy.concatenate([primitive.vertices[gx.VA_PTNMTXIDX.name] for primitive in batch.primitives])
source //= 3
for i,index in enumerate(batch.matrix_table):
if index == 0xFFFF: continue
matrix_table[i] = index
length = sum(len(primitive.vertices) for primitive in batch.primitives)
numpy.take(matrix_table,source,0,destination[vertex_index:vertex_index + length])
vertex_index += length
glEnableVertexAttribArray(MATRIX_INDEX_ATTRIBUTE_LOCATION)
vertex_type = vertex_array.dtype
stride = vertex_type.itemsize
offset = vertex_type.fields[gx.VA_PTNMTXIDX.name][1]
glVertexAttribIPointer(MATRIX_INDEX_ATTRIBUTE_LOCATION,1,GL_UNSIGNED_INT,stride,GLvoidp(offset))
def get_caption_batch(loaded_data, data_dir, dataset='flowers', batch_size=64):
captions = np.zeros((batch_size, loaded_data['max_caps_len']))
batch_idx = np.random.randint(0, loaded_data['data_length'],
size=batch_size)
image_ids = np.take(loaded_data['image_list'], batch_idx)
image_files = []
image_caps = []
image_caps_ids = []
for idx, image_id in enumerate(image_ids):
image_file = join(data_dir, dataset, 'jpg' + image_id)
random_caption = random.randint(0, 4)
image_caps_ids.append(random_caption)
captions[idx, :] = \
loaded_data['captions'][image_id][random_caption][
0:loaded_data['max_caps_len']]
image_caps.append(loaded_data['captions']
[image_id][random_caption])
image_files.append(image_file)
return captions, image_files, image_caps, image_ids, image_caps_ids
def get_val_caps_batch(batch_size, loaded_data, data_set, data_dir):
if data_set == 'flowers':
captions = np.zeros((batch_size, loaded_data['max_caps_len']))
batch_idx = np.random.randint(0, loaded_data['val_data_len'],
size = batch_size)
image_ids = np.take(loaded_data['val_img_list'], batch_idx)
image_files = []
image_caps = []
for idx, image_id in enumerate(image_ids) :
image_file = join(data_dir,
'flowers/jpg/' + image_id)
random_caption = random.randint(0, 4)
captions[idx, :] = \
loaded_data['val_captions'][image_id][random_caption][
0 :loaded_data['max_caps_len']]
image_caps.append(loaded_data['str_captions']
[image_id][random_caption])
image_files.append(image_file)
return captions, image_files, image_caps, image_ids
else:
raise Exception('Dataset not found')
def label_ranking_reciprocal_rank(label, # [sent_num]
preds): # [sent_num]
""" Calcualting the reciprocal rank according to definition,
"""
rank = np.argsort(preds)[::-1]
#pos_rank = np.take(rank, np.where(label == 1)[0])
#return np.mean(1.0 / pos_rank)
if_find = False
pos = 0
for r in rank:
pos += 1
if label[r] == 1:
first_pos_r = pos
if_find = True
break
assert(if_find)
return 1.0 / first_pos_r
def sample(self, n):
"""
Sample n elements uniformly from the memory
"""
indices = np.random.choice(self.cur_size, n, replace=False)
s1 = np.take(self.S1, indices, axis=0)
a = np.take(self.A, indices)
r = np.take(self.R, indices)
s2 = np.take(self.S2, indices, axis=0)
t = np.take(self.T, indices)
return s1, a, r, s2, t
# sample_elements = []
# for _ in range(n):
# sample_elements.append(self.memory[random.randint(0, len(self.memory)-1)])
#
# return sample_elements
def test_TakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
assert_equal(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y))
assert_equal(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_generic_methods(self):
# Tests some MaskedArray methods.
a = array([1, 3, 2])
assert_equal(a.any(), a._data.any())
assert_equal(a.all(), a._data.all())
assert_equal(a.argmax(), a._data.argmax())
assert_equal(a.argmin(), a._data.argmin())
assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))
assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))
assert_equal(a.conj(), a._data.conj())
assert_equal(a.conjugate(), a._data.conjugate())
m = array([[1, 2], [3, 4]])
assert_equal(m.diagonal(), m._data.diagonal())
assert_equal(a.sum(), a._data.sum())
assert_equal(a.take([1, 2]), a._data.take([1, 2]))
assert_equal(m.transpose(), m._data.transpose())
def test_testTakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))
assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))
assert_(eq(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y)))
assert_(eq(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y)))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_testArrayMethods(self):
a = array([1, 3, 2])
self.assertTrue(eq(a.any(), a._data.any()))
self.assertTrue(eq(a.all(), a._data.all()))
self.assertTrue(eq(a.argmax(), a._data.argmax()))
self.assertTrue(eq(a.argmin(), a._data.argmin()))
self.assertTrue(eq(a.choose(0, 1, 2, 3, 4),
a._data.choose(0, 1, 2, 3, 4)))
self.assertTrue(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])))
self.assertTrue(eq(a.conj(), a._data.conj()))
self.assertTrue(eq(a.conjugate(), a._data.conjugate()))
m = array([[1, 2], [3, 4]])
self.assertTrue(eq(m.diagonal(), m._data.diagonal()))
self.assertTrue(eq(a.sum(), a._data.sum()))
self.assertTrue(eq(a.take([1, 2]), a._data.take([1, 2])))
self.assertTrue(eq(m.transpose(), m._data.transpose()))
def test_4(self):
"""
Test of take, transpose, inner, outer products.
"""
x = self.arange(24)
y = np.arange(24)
x[5:6] = self.masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1)))
assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1))
assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)),
self.inner(x, y))
assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)),
self.outer(x, y))
y = self.array(['abc', 1, 'def', 2, 3], object)
y[2] = self.masked
t = self.take(y, [0, 3, 4])
assert t[0] == 'abc'
assert t[1] == 2
assert t[2] == 3
def take(self, indices, axis=None, out=None, mode='raise'):
"""
"""
(_data, _mask) = (self._data, self._mask)
cls = type(self)
# Make sure the indices are not masked
maskindices = getattr(indices, '_mask', nomask)
if maskindices is not nomask:
indices = indices.filled(0)
# Get the data
if out is None:
out = _data.take(indices, axis=axis, mode=mode).view(cls)
else:
np.take(_data, indices, axis=axis, mode=mode, out=out)
# Get the mask
if isinstance(out, MaskedArray):
if _mask is nomask:
outmask = maskindices
else:
outmask = _mask.take(indices, axis=axis, mode=mode)
outmask |= maskindices
out.__setmask__(outmask)
return out
# Array methods
def recalculate_objects(pred_dict, image):
proposals = pred_dict['rpn_prediction']['proposals']
proposals_prob = pred_dict['classification_prediction']['rcnn']['cls_prob']
proposals_target = proposals_prob.argmax(axis=1) - 1
bbox_offsets = pred_dict[
'classification_prediction']['rcnn']['bbox_offsets']
bbox_offsets = bbox_offsets[proposals_target >= 0]
proposals = proposals[proposals_target >= 0]
proposals_target = proposals_target[proposals_target >= 0]
bbox_offsets_idx_pairs = np.stack(
np.array([
proposals_target * 4, proposals_target * 4 + 1,
proposals_target * 4 + 2, proposals_target * 4 + 3]), axis=1)
bbox_offsets = np.take(bbox_offsets, bbox_offsets_idx_pairs.astype(np.int))
bboxes = decode(proposals, bbox_offsets)
return bboxes, proposals_target
def recollect(self, w):
if w is None:
self.w = w
return
k = w['kernel']
b = w['biases']
k = np.take(k, self.inp, 2)
k = np.take(k, self.out, 3)
b = np.take(b, self.out)
assert1 = k.shape == tuple(self.wshape['kernel'])
assert2 = b.shape == tuple(self.wshape['biases'])
assert assert1 and assert2, \
'Dimension not matching in {} recollect'.format(
self._signature)
self.w['kernel'] = k
self.w['biases'] = b
def recollect(self, w):
if w is None:
self.w = w
return
idx = self.keep_idx
k = w['kernel']
b = w['biases']
self.w['kernel'] = np.take(k, idx, 3)
self.w['biases'] = np.take(b, idx)
if self.batch_norm:
m = w['moving_mean']
v = w['moving_variance']
g = w['gamma']
self.w['moving_mean'] = np.take(m, idx)
self.w['moving_variance'] = np.take(v, idx)
self.w['gamma'] = np.take(g, idx)
def sample_from_histogram(p, n_samples=1):
"""
returns the indice of bin according to the histogram p
@param p: histogram
@type p: numpy.array
@param n_samples: number of samples to generate
@type n_samples: integer
"""
from numpy import add, less, argsort, take, arange
from numpy.random import random
indices = argsort(p)
indices = take(indices, arange(len(p) - 1, -1, -1))
c = add.accumulate(take(p, indices)) / add.reduce(p)
return indices[add.reduce(less.outer(c, random(n_samples)), 0)]
def load_dataset():
if(not os.path.exists("./dataset/training.csv")):
print("dataset does not exist")
raise Exception
#load dataset
labeled_image = pd.read_csv("./dataset/training.csv")
#preprocessing dataframe
image = np.array(labeled_image["Image"].values).reshape(-1,1)
image = np.apply_along_axis(lambda img: (img[0].split()),1,image)
image = image.astype(np.int32) #because train_img elements are string before preprocessing
image = image.reshape(-1,96*96) # data 96 * 96 size image
label = labeled_image.values[:,:-1]
label = label.astype(np.float32)
#nan value to mean value
col_mean = np.nanmean(label, axis=0)
indices = np.where(np.isnan(label))
label[indices] = np.take(col_mean, indices[1])
return image, label
def test_TakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
assert_equal(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y))
assert_equal(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_generic_methods(self):
# Tests some MaskedArray methods.
a = array([1, 3, 2])
assert_equal(a.any(), a._data.any())
assert_equal(a.all(), a._data.all())
assert_equal(a.argmax(), a._data.argmax())
assert_equal(a.argmin(), a._data.argmin())
assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))
assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))
assert_equal(a.conj(), a._data.conj())
assert_equal(a.conjugate(), a._data.conjugate())
m = array([[1, 2], [3, 4]])
assert_equal(m.diagonal(), m._data.diagonal())
assert_equal(a.sum(), a._data.sum())
assert_equal(a.take([1, 2]), a._data.take([1, 2]))
assert_equal(m.transpose(), m._data.transpose())
def test_testTakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))
assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))
assert_(eq(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y)))
assert_(eq(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y)))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_testArrayMethods(self):
a = array([1, 3, 2])
self.assertTrue(eq(a.any(), a._data.any()))
self.assertTrue(eq(a.all(), a._data.all()))
self.assertTrue(eq(a.argmax(), a._data.argmax()))
self.assertTrue(eq(a.argmin(), a._data.argmin()))
self.assertTrue(eq(a.choose(0, 1, 2, 3, 4),
a._data.choose(0, 1, 2, 3, 4)))
self.assertTrue(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])))
self.assertTrue(eq(a.conj(), a._data.conj()))
self.assertTrue(eq(a.conjugate(), a._data.conjugate()))
m = array([[1, 2], [3, 4]])
self.assertTrue(eq(m.diagonal(), m._data.diagonal()))
self.assertTrue(eq(a.sum(), a._data.sum()))
self.assertTrue(eq(a.take([1, 2]), a._data.take([1, 2])))
self.assertTrue(eq(m.transpose(), m._data.transpose()))
def test_4(self):
"""
Test of take, transpose, inner, outer products.
"""
x = self.arange(24)
y = np.arange(24)
x[5:6] = self.masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1)))
assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1))
assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)),
self.inner(x, y))
assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)),
self.outer(x, y))
y = self.array(['abc', 1, 'def', 2, 3], object)
y[2] = self.masked
t = self.take(y, [0, 3, 4])
assert t[0] == 'abc'
assert t[1] == 2
assert t[2] == 3
def take(self, indices, axis=None, out=None, mode='raise'):
"""
"""
(_data, _mask) = (self._data, self._mask)
cls = type(self)
# Make sure the indices are not masked
maskindices = getattr(indices, '_mask', nomask)
if maskindices is not nomask:
indices = indices.filled(0)
# Get the data
if out is None:
out = _data.take(indices, axis=axis, mode=mode).view(cls)
else:
np.take(_data, indices, axis=axis, mode=mode, out=out)
# Get the mask
if isinstance(out, MaskedArray):
if _mask is nomask:
outmask = maskindices
else:
outmask = _mask.take(indices, axis=axis, mode=mode)
outmask |= maskindices
out.__setmask__(outmask)
return out
# Array methods
def process_data(coords, nbr_idx, elements):
num_atoms = len(nbr_idx)
# truncates off zero padding at the end and maps atomic numbers to atom types
coords = coords[:num_atoms, :]
elements = np.array([atom_dictionary[elements[i]] for i in range(num_atoms)], dtype=np.int32)
# pad the neighbor indices with zeros if not enough neighbors
elements = np.append(elements, 0)
for i in range(num_atoms):
if len(nbr_idx[i]) < 12:
nbr_idx[i].extend(np.ones([12-len(nbr_idx[i])], dtype=np.int32) * num_atoms)
nbr_idx = np.array([nbr_idx[i] for i in range(num_atoms)], dtype=np.int32)
# creates neighboring atom type matrix - 0 = nonexistent atom
nbr_atoms = np.take(elements, nbr_idx)
np.place(nbr_idx, nbr_idx >= num_atoms, 0)
elements = elements[:-1]
return (coords.astype(np.float32), nbr_idx.astype(np.int32),
elements.astype(np.int32), nbr_atoms.astype(np.int32))
test_internals.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 39
收藏 0
点赞 0
评论 0
def test_take(self):
def assert_take_ok(mgr, axis, indexer):
mat = mgr.as_matrix()
taken = mgr.take(indexer, axis)
assert_almost_equal(np.take(mat, indexer, axis), taken.as_matrix())
assert_almost_equal(mgr.axes[axis].take(indexer), taken.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# take/fancy indexer
yield assert_take_ok, mgr, ax, []
yield assert_take_ok, mgr, ax, [0, 0, 0]
yield assert_take_ok, mgr, ax, lrange(mgr.shape[ax])
if mgr.shape[ax] >= 3:
yield assert_take_ok, mgr, ax, [0, 1, 2]
yield assert_take_ok, mgr, ax, [-1, -2, -3]
test_nanops.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def check_bool(self, func, value, correct, *args, **kwargs):
while getattr(value, 'ndim', True):
try:
res0 = func(value, *args, **kwargs)
if correct:
self.assertTrue(res0)
else:
self.assertFalse(res0)
except BaseException as exc:
exc.args += ('dim: %s' % getattr(value, 'ndim', value), )
raise
if not hasattr(value, 'ndim'):
break
try:
value = np.take(value, 0, axis=-1)
except ValueError:
break
merge.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 41
收藏 0
点赞 0
评论 0
def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
if len(join_keys) > 1:
if not ((isinstance(right_ax, MultiIndex) and
len(join_keys) == right_ax.nlevels)):
raise AssertionError("If more than one join key is given then "
"'right_ax' must be a MultiIndex and the "
"number of join keys must be the number of "
"levels in right_ax")
left_indexer, right_indexer = \
_get_multiindex_indexer(join_keys, right_ax, sort=sort)
else:
jkey = join_keys[0]
left_indexer, right_indexer = \
_get_single_indexer(jkey, right_ax, sort=sort)
if sort or len(left_ax) != len(left_indexer):
# if asked to sort or there are 1-to-many matches
join_index = left_ax.take(left_indexer)
return join_index, left_indexer, right_indexer
# left frame preserves order & length of its index
return left_ax, None, right_indexer
merge.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 34
收藏 0
点赞 0
评论 0
def _sort_labels(uniques, left, right):
if not isinstance(uniques, np.ndarray):
# tuplesafe
uniques = Index(uniques).values
sorter = uniques.argsort()
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
new_left = reverse_indexer.take(com._ensure_platform_int(left))
np.putmask(new_left, left == -1, -1)
new_right = reverse_indexer.take(com._ensure_platform_int(right))
np.putmask(new_right, right == -1, -1)
return new_left, new_right
test_core.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 38
收藏 0
点赞 0
评论 0
def test_generic_methods(self):
# Tests some MaskedArray methods.
a = array([1, 3, 2])
assert_equal(a.any(), a._data.any())
assert_equal(a.all(), a._data.all())
assert_equal(a.argmax(), a._data.argmax())
assert_equal(a.argmin(), a._data.argmin())
assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))
assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))
assert_equal(a.conj(), a._data.conj())
assert_equal(a.conjugate(), a._data.conjugate())
m = array([[1, 2], [3, 4]])
assert_equal(m.diagonal(), m._data.diagonal())
assert_equal(a.sum(), a._data.sum())
assert_equal(a.take([1, 2]), a._data.take([1, 2]))
assert_equal(m.transpose(), m._data.transpose())