def sparse_tuple_from(sequences, dtype=np.int32):
r"""Creates a sparse representention of ``sequences``.
Args:
* sequences: a list of lists of type dtype where each element is a sequence
Returns a tuple with (indices, values, shape)
"""
indices = []
values = []
for n, seq in enumerate(sequences):
indices.extend(zip([n]*len(seq), range(len(seq))))
values.extend(seq)
indices = np.asarray(indices, dtype=np.int64)
values = np.asarray(values, dtype=dtype)
shape = np.asarray([len(sequences), indices.max(0)[1]+1], dtype=np.int64)
return tf.SparseTensor(indices=indices, values=values, shape=shape)
python类int64()的实例源码
def draw_tracks(self, out, colored=False, color_type='unique', min_track_length=4, max_track_length=4):
"""
color_type: {age, unique}
"""
N = 20
# inds = self.confident_tracks(min_length=min_track_length)
# if not len(inds):
# return
# ids, pts = self.latest_ids[inds], self.latest_pts[inds]
# lengths = self.tm_.lengths[inds]
ids, pts, lengths = self.latest_ids, self.latest_pts, self.tm_.lengths
if color_type == 'unique':
cwheel = colormap(np.linspace(0, 1, N))
cols = np.vstack([cwheel[tid % N] for idx, tid in enumerate(ids)])
elif color_type == 'age':
cols = colormap(lengths)
else:
raise ValueError('Color type {:} undefined, use age or unique'.format(color_type))
if not colored:
cols = np.tile([0,240,0], [len(self.tm_.tracks), 1])
for col, pts in izip(cols.astype(np.int64), self.tm_.tracks.itervalues()):
cv2.polylines(out, [np.vstack(pts.items).astype(np.int32)[-max_track_length:]], False,
tuple(col), thickness=1)
tl, br = np.int32(pts.latest_item)-2, np.int32(pts.latest_item)+2
cv2.rectangle(out, (tl[0], tl[1]), (br[0], br[1]), tuple(col), -1)
def sparse_tuple_from(sequences, dtype=np.int32):
r"""Creates a sparse representention of ``sequences``.
Args:
* sequences: a list of lists of type dtype where each element is a sequence
Returns a tuple with (indices, values, shape)
"""
indices = []
values = []
for n, seq in enumerate(sequences):
indices.extend(zip([n]*len(seq), range(len(seq))))
values.extend(seq)
indices = np.asarray(indices, dtype=np.int64)
values = np.asarray(values, dtype=dtype)
shape = np.asarray([len(sequences), indices.max(0)[1]+1], dtype=np.int64)
return tf.SparseTensor(indices=indices, values=values, shape=shape)
def _get_slice_(self, t_start, t_stop):
x_beg = numpy.int64(t_start // self.SAMPLES_PER_RECORD)
r_beg = numpy.mod(t_start, self.SAMPLES_PER_RECORD)
x_end = numpy.int64(t_stop // self.SAMPLES_PER_RECORD)
r_end = numpy.mod(t_stop, self.SAMPLES_PER_RECORD)
if x_beg == x_end:
g_offset = x_beg * self.bytes_per_block_div + self.block_offset_div
data_slice = numpy.arange(g_offset + r_beg * self.nb_channels, g_offset + r_end * self.nb_channels, dtype=numpy.int64)
yield data_slice
else:
for count, nb_blocks in enumerate(numpy.arange(x_beg, x_end + 1, dtype=numpy.int64)):
g_offset = nb_blocks * self.bytes_per_block_div + self.block_offset_div
if count == 0:
data_slice = numpy.arange(g_offset + r_beg * self.nb_channels, g_offset + self.block_size_div, dtype=numpy.int64)
elif (count == (x_end - x_beg)):
data_slice = numpy.arange(g_offset, g_offset + r_end * self.nb_channels, dtype=numpy.int64)
else:
data_slice = numpy.arange(g_offset, g_offset + self.block_size_div, dtype=numpy.int64)
yield data_slice
def _get_slice_(self, t_start, t_stop):
x_beg = numpy.int64(t_start // self.SAMPLES_PER_RECORD)
r_beg = numpy.mod(t_start, self.SAMPLES_PER_RECORD)
x_end = numpy.int64(t_stop // self.SAMPLES_PER_RECORD)
r_end = numpy.mod(t_stop, self.SAMPLES_PER_RECORD)
data_slice = []
if x_beg == x_end:
g_offset = x_beg * self.SAMPLES_PER_RECORD + self.OFFSET_PER_BLOCK[0]*(x_beg + 1) + self.OFFSET_PER_BLOCK[1]*x_beg
data_slice = numpy.arange(g_offset + r_beg, g_offset + r_end, dtype=numpy.int64)
else:
for count, nb_blocks in enumerate(numpy.arange(x_beg, x_end + 1, dtype=numpy.int64)):
g_offset = nb_blocks * self.SAMPLES_PER_RECORD + self.OFFSET_PER_BLOCK[0]*(nb_blocks + 1) + self.OFFSET_PER_BLOCK[1]*nb_blocks
if count == 0:
data_slice += numpy.arange(g_offset + r_beg, g_offset + self.SAMPLES_PER_RECORD, dtype=numpy.int64).tolist()
elif (count == (x_end - x_beg)):
data_slice += numpy.arange(g_offset, g_offset + r_end, dtype=numpy.int64).tolist()
else:
data_slice += numpy.arange(g_offset, g_offset + self.SAMPLES_PER_RECORD, dtype=numpy.int64).tolist()
return data_slice
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
def test_dataframe_data_types():
s1 = XSeries([pd.Series([1, 2, 3], index=['a', 'b', 'c']),
pd.Series([4, 5, 6], index=['d', 'e', 'g'])])
s2 = XSeries([1, 2, 3])
s3 = XSeries([{"k1": "v1"}, {"k2": 'v2'}])
s4 = XSeries(['f', 's', 't'])
df = XDataFrame({
'first_col': s1,
'second_col': s2,
'third_col': s3,
'fourth_col': s4
})
assert df['first_col'].data_type == pd.Series
assert df['second_col'].data_type == np.int64
assert df['third_col'].data_type == dict
assert df['fourth_col'].data_type == str
assert type(df[['first_col']]) == XDataFrame
assert type(df[['first_col', 'second_col']]) == XDataFrame
def test_dataframe_sub_frame_data_types():
s1 = XSeries([pd.Series([1, 2, 3], index=['a', 'b', 'c']),
pd.Series([4, 5, 6], index=['d', 'e', 'g'])])
s2 = XSeries([1, 2, 3])
s3 = XSeries([{"k1": "v1"}, {"k2": 'v2'}])
s4 = XSeries(['f', 's', 't'])
df = XDataFrame({
'first_col': s1,
'second_col': s2,
'third_col': s3,
'fourth_col': s4
})
sub_df = df.loc[:2]
assert type(sub_df) == XDataFrame
assert sub_df['first_col'].data_type == pd.Series
assert sub_df['second_col'].data_type == np.int64
assert sub_df['third_col'].data_type == dict
assert sub_df['fourth_col'].data_type == str
assert type(sub_df[['first_col']]) == XDataFrame
assert type(sub_df[['first_col', 'second_col']]) == XDataFrame
def save_h5(f, group, key, namedtuple):
""" Save a namedtuple to an h5 file under a group and subgroup """
if VERSION_KEY in f.root:
version = int(getattr(f.root, VERSION_KEY))
if version != VERSION:
raise ValueError("Attempted to write analysis HDF5 version %d data to a version %d file" % (VERSION, version))
else:
ds = f.create_array(f.root, VERSION_KEY, np.int64(VERSION))
subgroup = f.create_group(group, '_'+key)
for field in namedtuple._fields:
arr = getattr(namedtuple, field)
if not hasattr(arr, 'dtype'):
raise ValueError('%s/%s must be a numpy array or scalar' % (group,key))
atom = tables.Atom.from_dtype(arr.dtype)
if len(arr.shape) > 0:
if arr.size > 0:
ds = f.create_carray(subgroup, field, atom, arr.shape)
else:
ds = f.create_earray(subgroup, field, atom, arr.shape)
ds[:] = arr
else:
ds = f.create_array(subgroup, field, arr)
def _validate(self, machine, n=10):
N = n * n
# same row same z
z = tf.random_normal(shape=[n, self.arch['z_dim']])
z = tf.tile(z, [1, n])
z = tf.reshape(z, [N, -1])
z = tf.Variable(z, trainable=False, dtype=tf.float32)
# same column same y
y = tf.range(0, 10, 1, dtype=tf.int64)
y = tf.reshape(y, [-1, 1])
y = tf.tile(y, [n, 1])
Xh = machine.generate(z, y) # 100, 64, 64, 3
# Xh = gray2jet(Xh)
# Xh = make_png_thumbnail(Xh, n)
Xh = make_png_jet_thumbnail(Xh, n)
return Xh
def _validate(self, machine, n=10):
N = n * n
z = np.random.normal(0., 1., size=[n, self.arch['z_dim']])
z = np.concatenate([z] * n, axis=1)
z = np.reshape(z, [N, -1]).astype(np.float32) # consecutive rows
y = np.asarray(
[[5, 0, 0 ],
[9, 0, 0 ],
[12, 0, 0 ],
[17, 0, 0 ],
[19, 0, 0 ],
[161, 0, 0 ],
[170, 0, 0 ],
[170, 16, 0 ],
[161, 9, 4 ],
[19, 24, 50]],
dtype=np.int64)
y = np.concatenate([y] * n, axis=0)
Z = tf.constant(z)
Y = tf.constant(y)
Xh = machine.generate(Z, Y) # 100, 64, 64, 3
Xh = make_png_thumbnail(Xh, n)
return Xh
def _validate(self, machine, n=10):
N = n * n
# same row same z
z = tf.random_normal(shape=[n, self.arch['z_dim']])
z = tf.tile(z, [1, n])
z = tf.reshape(z, [N, -1])
z = tf.Variable(z, trainable=False, dtype=tf.float32)
# same column same y
y = tf.range(0, 10, 1, dtype=tf.int64)
y = tf.reshape(y, [-1,])
y = tf.tile(y, [n,])
Xh = machine.generate(z, y) # 100, 64, 64, 3
Xh = make_png_thumbnail(Xh, n)
return Xh
def _process_items(self, index, rgb_im, depth_im, bbox, pose):
def _process_bbox(bbox):
return AttrDict(category=bbox['category'],
target=UWRGBDDataset.target_hash[str(bbox['category'])],
coords=np.int64([bbox['left'], bbox['top'], bbox['right'], bbox['bottom']]))
# Compute bbox from pose and map (v2 support)
if self.version == 'v1':
if bbox is not None:
bbox = [_process_bbox(bb) for bb in bbox]
bbox = filter(lambda bb: bb.target in UWRGBDDataset.train_ids_set, bbox)
if self.version == 'v2':
if bbox is None and hasattr(self, 'map_info'):
bbox = self.get_bboxes(pose)
# print 'Processing pose', pose, bbox
return AttrDict(index=index, img=rgb_im, depth=depth_im,
bbox=bbox if bbox is not None else [], pose=pose)
def Saliency_map(image,model,preprocess,ground_truth,use_gpu=False,method=util.GradType.GUIDED):
vis_param_dict['method'] = method
img_tensor = preprocess(image)
img_tensor.unsqueeze_(0)
if use_gpu:
img_tensor=img_tensor.cuda()
input = Variable(img_tensor,requires_grad=True)
if input.grad is not None:
input.grad.data.zero_()
model.zero_grad()
output = model(input)
ind=torch.LongTensor(1)
if(isinstance(ground_truth,np.int64)):
ground_truth=np.asscalar(ground_truth)
ind[0]=ground_truth
ind=Variable(ind)
energy=output[0,ground_truth]
energy.backward()
grad=input.grad
if use_gpu:
return np.abs(grad.data.cpu().numpy()[0]).max(axis=0)
return np.abs(grad.data.numpy()[0]).max(axis=0)
def connectToDB(dbName=None, userName=None, dbPassword=None, dbHost=None,
dbPort=None, dbCursor=psycopg2.extras.DictCursor):
'''
Connect to a specified PostgreSQL DB and return connection and cursor objects.
'''
# Start DB connection
try:
connectionString = "dbname='" + dbName + "'"
if userName != None and userName != '':
connectionString += " user='" + userName + "'"
if dbHost != None and dbHost != '':
connectionString += " host='" + dbHost + "'"
if dbPassword != None and dbPassword != '':
connectionString += " password='" + dbPassword + "'"
if dbPort != None:
connectionString += " port='" + str(dbPort) + "'"
connection = psycopg2.connect(connectionString)
register_adapter(numpy.float64, addapt_numpy_float64)
register_adapter(numpy.int64, addapt_numpy_int64)
except:
raise
# if the connection succeeded get a cursor
cursor = connection.cursor(cursor_factory=dbCursor)
return connection, cursor
def _check_valid_data(self, data):
"""Checks that the incoming data is a 2 x #elements ndarray of ints.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to verify.
Raises
------
ValueError
If the data is not of the correct shape or type.
"""
if data.dtype.type != np.int8 and data.dtype.type != np.int16 \
and data.dtype.type != np.int32 and data.dtype.type != np.int64 \
and data.dtype.type != np.uint8 and data.dtype.type != np.uint16 \
and data.dtype.type != np.uint32 and data.dtype.type != np.uint64:
raise ValueError('Must initialize image coords with a numpy int ndarray')
if data.shape[0] != 2:
raise ValueError('Illegal data array passed to image coords. Must have 2 coordinates')
if len(data.shape) > 2:
raise ValueError('Illegal data array passed to point cloud. Must have 1 or 2 dimensions')
def level_up(self, skill_level=None, slot_num=None, level=None, bond=None):
is_valid = lambda x, min_val, max_val: x is None or (type(x) in [int, np.int64] and x <= max_val and x >= min_val)
check = [is_valid(level, 1, self.max_level),
is_valid(bond, 0, self.max_bond),
is_valid(skill_level, 1, 8),
is_valid(slot_num, self.min_slot_num, self.max_slot_num)]
is_none = [x is None for x in [level, bond, slot_num, skill_level]]
if not all(check):
attr_name = np.array(['Level', 'Bond', 'Skill Level', 'Slot Number'])
print(self)
print('{0} must be integer within valid range!'.format(', '.join(attr_name[[not x for x in check]])))
raise
not_none = [not x for x in is_none]
new_attr = np.array([self.level, self.bond, self.slot_num, 0 if self.skill is None else self.skill.level], dtype=int)
new_attr[not_none] = np.array([level, bond, slot_num, 0 if skill_level is None else skill_level])[not_none]
self.level, self.bond, self.slot_num, skill_level = new_attr
if self.skill is not None and skill_level in list(range(1,9)):
self.skill.set_level(skill_level)
self.smile, self.pure, self.cool, self.hp = [self.stats_list[self.level-1][i] for i in [0,1,2,5]]
def drop_inconsistent_keys(self, columns, obj):
"""Drop inconsistent keys
Drop inconsistent keys from a ValueCounts or Histogram object.
:param list columns: columns key to retrieve desired datatypes
:param object obj: ValueCounts or Histogram object to drop inconsistent keys from
"""
# has array been converted first? if so, set correct comparison
# datatype
comp_dtype = []
for col in columns:
dt = np.dtype(self.var_dtype[col]).type()
is_converted = isinstance(
dt, np.number) or isinstance(
dt, np.datetime64)
if is_converted:
comp_dtype.append(np.int64)
else:
comp_dtype.append(self.var_dtype[col])
# keep only keys of types in comp_dtype
obj.remove_keys_of_inconsistent_type(prefered_key_type=comp_dtype)
return obj
def get_bin_center(self, bin_label):
"""Return bin center for a given bin index
:param bin_label: bin label for which to find the bin center
:returns: bin center, can be float, int, timestamp
"""
if not self.bin_specs:
return None
bin_idx = np.int64(bin_label)
if 'bin_edges' in self.bin_specs:
bin_edges = self.bin_specs['bin_edges']
if bin_idx < 0 or bin_idx >= len(bin_edges):
raise RuntimeError('bin_label "%s" does not fit in bin edges' % bin_label)
# NOTE: computation below also works with timestamps! Order is
# important.
bin_width = bin_edges[bin_idx + 1] - bin_edges[bin_idx]
bin_width_half = bin_width / 2.
bin_center = bin_edges[bin_idx] + bin_width_half
else:
width = self.bin_specs['bin_width']
offset = self.bin_specs.get('bin_offset', 0.)
# NOTE: this notation also works with timestamps!
bin_center = offset + (bin_idx + 0.5) * width
return bin_center
def get_left_bin_edge(self, bin_label):
"""Return left bin edge for a given bin index
:param bin_label: bin label for which to find the left bin edge
:returns: bin edge, can be float, int, timestamp
"""
# check bin specifications and specified value
if not self.bin_specs:
return None
bin_idx = np.int64(bin_label)
if 'bin_edges' in self.bin_specs:
bin_edges = self.bin_specs['bin_edges']
if bin_idx < 0 or bin_idx >= len(bin_edges):
raise RuntimeError('bin label "{}" does not fit in bin edges'.format(bin_label))
bin_edge_left = bin_edges[bin_idx]
else:
width = self.bin_specs['bin_width']
offset = self.bin_specs.get('bin_offset', 0.)
# NOTE: this notation also works with timestamps!
bin_edge_left = offset + (bin_idx * width)
return bin_edge_left
def get_right_bin_edge(self, bin_label):
"""Return right bin edge for a given bin index
:param bin_label: bin label for which to find the right bin edge.
:returns: bin edge, can be float, int, timestamp
"""
# check bin specifications and specified value
if not self.bin_specs:
return None
bin_idx = np.int64(bin_label)
if 'bin_edges' in self.bin_specs:
bin_edges = self.bin_specs['bin_edges']
if bin_idx < 0 or bin_idx >= len(bin_edges) - 1:
raise RuntimeError('bin label "{}" does not fit in bin_edges'.format(bin_label))
bin_edge_right = bin_edges[bin_idx + 1]
else:
width = self.bin_specs['bin_width']
offset = self.bin_specs.get('bin_offset', 0.)
# NOTE: this notation also works with timestamps!
bin_edge_right = offset + (bin_idx + 1) * width
return bin_edge_right
def to_int(val, **kwargs):
"""Convert input to int
:param val: value to be evaluated
:returns: evaluated value
:rtype: np.int64
"""
try:
if pd.isnull(val):
return kwargs['nan']
except BaseException:
pass
if isinstance(val, np.int64) or isinstance(val, int):
return np.int64(val)
if kwargs.get('convert_inconsistent_dtypes', True):
try:
return np.int64(val)
except BaseException:
pass
return kwargs['nan']
def bool_to_int(val):
"""Convert input boolean to int
:param val: value to be evaluated
:returns: evaluated value
:rtype: np.int64
"""
try:
if pd.isnull(val):
return kwargs['nan']
except BaseException:
pass
if isinstance(val, np.bool_) or isinstance(val, bool):
return np.int64(val)
if kwargs.get('convert_inconsistent_dtypes', False):
try:
return np.int64(val)
except BaseException:
pass
return kwargs['nan']
def guarantee_array(variable):
''' Guarantees that a varaible is a numpy ndarray and supports -, *, +, and other operators
Args:
variable (`number` or `numpy.ndarray`): variable to coalesce
Returns:
(type). Which supports * / and other operations with arrays
'''
if type(variable) in [float, np.ndarray, np.int32, np.int64, np.float32, np.float64, np.complex64, np.complex128]:
return variable
elif type(variable) is int:
return float(variable)
elif type(variable) is list:
return np.asarray(variable)
else:
raise ValueError(f'variable is of invalid type {type(variable)}')
def validate_gibbs_parameters(alpha1, alpha2, beta, restarts,
draws_per_restart, burnin, delay):
'''Return `True` if params numerically acceptable. See `gibbs` for docs.'''
real_vals = [alpha1, alpha2, beta]
int_vals = [restarts, draws_per_restart, burnin, delay]
# Check everything is real.
if all(np.isreal(val) for val in real_vals + int_vals):
# Check that integer values are some type of int.
int_check = all(isinstance(val, (int, np.int32, np.int64)) for val in
int_vals)
# All integer values must be > 0.
pos_int = all(val > 0 for val in int_vals)
# All real values must be non-negative.
non_neg = all(val >= 0 for val in real_vals)
return int_check and pos_int and non_neg and real_vals
else: # Failed to be all numeric values.
False
def initalize(ob, key):
'''Set up the indexing for viewing each edge per vert per face loop'''
obm = get_bmesh(ob)
ed_pairs_per_v = []
for f in obm.faces:
for v in f.verts:
set = []
for e in f.edges:
if v in e.verts:
set.append(e.index)
ed_pairs_per_v.append(set)
data[ob.name]['ed_pairs_per_v'] = np.array(ed_pairs_per_v)
data[ob.name]['zeros'] = np.zeros(len(data[ob.name]['ed_pairs_per_v']) * 3).reshape(len(data[ob.name]['ed_pairs_per_v']), 3)
key_coords = get_key_coords(ob, key)
ed1 = get_edge_idx(ob)
#linked = np.array([len(i.link_faces) for i in obm.edges]) > 0
data[ob.name]['edges'] = get_edge_idx(ob)#[linked]
dif = key_coords[data[ob.name]['edges'][:,0]] - key_coords[data[ob.name]['edges'][:,1]]
data[ob.name]['mags'] = np.sqrt(np.einsum('ij,ij->i', dif, dif))
mat_idx = np.zeros(len(ob.data.polygons), dtype=np.int64)
ob.data.polygons.foreach_get('material_index', mat_idx)
data[ob.name]['mat_index'] = mat_idx
if 'material' not in data[ob.name]:
print('ran this')
material_setup(ob)
def triangulate(ob='empty', proxy=False):
'''Requires a mesh. Returns an index array for viewing
the coordinates as triangles. Store this!!! rather than recalculating
every time. !!!Could use for_each_get with the mesh and polygons if
all the faces have 3 points!!! Could also write bmesh to mesh and use
foreach_get'''
if ob == 'empty':
ob = bpy.context.object
if proxy:
mods = True
else:
mods = False
proxy = ob.to_mesh(bpy.context.scene, mods, 'PREVIEW')
obm = get_bmesh(proxy)
bmesh.ops.triangulate(obm, faces=obm.faces)
obm.to_mesh(proxy)
count = len(proxy.polygons)
tri_idx = np.zeros(count * 3, dtype=np.int64)
proxy.polygons.foreach_get('vertices', tri_idx)
bpy.data.meshes.remove(proxy)
obm.free()
return tri_idx.reshape(count, 3)
def _validate_X_predict(
self, X: np.ndarray, check_input: bool) -> np.ndarray:
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError(
"No support for np.int64 index based sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError(
"Number of features of the model must match the input."
" Model n_features is %s and input n_features is %s "
% (self.n_features_, n_features))
return X
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])