def execute(self, query, params=[]):
if len(params) > 0 and len(query.split(';')) > 1:
raise Exception("Multiple queries with parameters is unsupported")
# Expand lists in paramters
prev = -1
new_params = []
for p in params:
prev = query.find('?', prev+1)
if type(p) in [np.uint16, np.uint32, np.uint64]:
new_params.append(np.int64(p)) # sqlite is really fussy about this
elif type(p) in [list, tuple]:
rep = "(" + ",".join("?"*len(p)) + ")"
query = query[:prev] + rep + query[prev+1:]
prev += len(rep)
new_params.extend(p)
else:
new_params.append(p)
for q in query.split(';'):
self.cur.execute(q, tuple(new_params))
return self.cur
python类uint64()的实例源码
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
def ctz(val, _type):
cnt = int()
power = int()
if _type == 'uint32':
bits = np.uint32(val)
while power < 32:
if val & 2**power == 0:
cnt += 1
else:
break
power += 1
elif _type == 'uint64':
bits = bin(np.uint64(val))
while power < 64:
if val & 2**power == 0:
cnt += 1
else:
break
power += 1
else:
raise Exception(Colors.red + "unsupported type passed to ctz." + Colors.ENDC)
return cnt
def pop_cnt(val, _type):
cnt = int()
power = int()
if _type == 'uint32':
bits = np.uint32(val)
while power < 32:
if val & 2**power != 0:
cnt += 1
power += 1
elif _type == 'uint64':
bits = bin(np.uint64(val))
while power < 64:
if val & 2**power != 0:
cnt += 1
power += 1
else:
raise Exception(Colors.red + "unsupported type passed to pop_cnt." + Colors.ENDC)
return cnt
test_numpy_mt19937.py 文件源码
项目:scipy-2017-cython-tutorial
作者: kwmsmith
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = mt19937.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def _call_nms_kernel(bbox, thresh):
n_bbox = bbox.shape[0]
threads_per_block = 64
col_blocks = np.ceil(n_bbox / threads_per_block).astype(np.int32)
blocks = (col_blocks, col_blocks, 1)
threads = (threads_per_block, 1, 1)
mask_dev = cp.zeros((n_bbox * col_blocks,), dtype=np.uint64)
bbox = cp.ascontiguousarray(bbox, dtype=np.float32)
kern = _load_kernel('nms_kernel', _nms_gpu_code)
kern(blocks, threads, args=(cp.int32(n_bbox), cp.float32(thresh),
bbox, mask_dev))
mask_host = mask_dev.get()
selection, n_selec = _nms_gpu_post(
mask_host, n_bbox, threads_per_block, col_blocks)
return selection, n_selec
def dump(result, fname, no_prefix=False):
"""Save result to file."""
result = result.eval() if hasattr(result, "eval") else result
result = np.asarray(result)
if result.shape == (): # savetxt has problems with scalars
result = np.expand_dims(result, 0)
if no_prefix:
location = os.getcwd()+"/"+fname
else:
location = os.getcwd()+"/data/"+fname
# special handling for integer datatypes
if (
result.dtype == np.uint8 or result.dtype == np.int8 or
result.dtype == np.uint16 or result.dtype == np.int16 or
result.dtype == np.uint32 or result.dtype == np.int32 or
result.dtype == np.uint64 or result.dtype == np.int64
):
np.savetxt(location, result, fmt="%d", delimiter=',')
else:
np.savetxt(location, result, delimiter=',')
print(location)
def dump(result, fname, no_prefix=False):
"""Save result to file."""
result = result.eval() if hasattr(result, "eval") else result
result = np.asarray(result)
if result.shape == (): # savetxt has problems with scalars
result = np.expand_dims(result, 0)
if no_prefix:
location = os.getcwd()+"/"+fname
else:
location = os.getcwd()+"/data/"+fname
# special handling for integer datatypes
if (
result.dtype == np.uint8 or result.dtype == np.int8 or
result.dtype == np.uint16 or result.dtype == np.int16 or
result.dtype == np.uint32 or result.dtype == np.int32 or
result.dtype == np.uint64 or result.dtype == np.int64
):
np.savetxt(location, result, fmt="%d", delimiter=',')
else:
np.savetxt(location, result, delimiter=',')
print(location)
def dump(result, fname, no_prefix=False):
"""Save result to file."""
result = result.eval() if hasattr(result, "eval") else result
result = np.asarray(result)
if result.shape == (): # savetxt has problems with scalars
result = np.expand_dims(result, 0)
if no_prefix:
location = os.getcwd()+"/"+fname
else:
location = os.getcwd()+"/data/"+fname
# special handling for integer datatypes
if (
result.dtype == np.uint8 or result.dtype == np.int8 or
result.dtype == np.uint16 or result.dtype == np.int16 or
result.dtype == np.uint32 or result.dtype == np.int32 or
result.dtype == np.uint64 or result.dtype == np.int64
):
np.savetxt(location, result, fmt="%d", delimiter=',')
else:
np.savetxt(location, result, delimiter=',')
print(location)
def dump(result, fname, no_prefix=False):
"""Save result to file."""
result = result.eval() if hasattr(result, "eval") else result
result = np.asarray(result)
if result.shape == (): # savetxt has problems with scalars
result = np.expand_dims(result, 0)
if no_prefix:
location = os.getcwd()+"/"+fname
else:
location = os.getcwd()+"/data/"+fname
# special handling for integer datatypes
if (
result.dtype == np.uint8 or result.dtype == np.int8 or
result.dtype == np.uint16 or result.dtype == np.int16 or
result.dtype == np.uint32 or result.dtype == np.int32 or
result.dtype == np.uint64 or result.dtype == np.int64
):
np.savetxt(location, result, fmt="%d", delimiter=',')
else:
np.savetxt(location, result, delimiter=',')
print(location)
def dump(result, fname, no_prefix=False):
"""Save result to file."""
result = result.eval() if hasattr(result, "eval") else result
result = np.asarray(result)
if result.shape == (): # savetxt has problems with scalars
result = np.expand_dims(result, 0)
if no_prefix:
location = os.getcwd()+"/"+fname
else:
location = os.getcwd()+"/data/"+fname
# special handling for integer datatypes
if (
result.dtype == np.uint8 or result.dtype == np.int8 or
result.dtype == np.uint16 or result.dtype == np.int16 or
result.dtype == np.uint32 or result.dtype == np.int32 or
result.dtype == np.uint64 or result.dtype == np.int64
):
np.savetxt(location, result, fmt="%d", delimiter=',')
else:
np.savetxt(location, result, delimiter=',')
print(location)
def dump(result, fname, no_prefix=False):
"""Save result to file."""
result = result.eval() if hasattr(result, "eval") else result
result = np.asarray(result)
if result.shape == (): # savetxt has problems with scalars
result = np.expand_dims(result, 0)
if no_prefix:
location = os.getcwd()+"/"+fname
else:
location = os.getcwd()+"/data/"+fname
# special handling for integer datatypes
if (
result.dtype == np.uint8 or result.dtype == np.int8 or
result.dtype == np.uint16 or result.dtype == np.int16 or
result.dtype == np.uint32 or result.dtype == np.int32 or
result.dtype == np.uint64 or result.dtype == np.int64
):
np.savetxt(location, result, fmt="%d", delimiter=',')
else:
np.savetxt(location, result, delimiter=',')
print(location)
def time_seconds(tc_array, year):
"""Return the time object from the timecodes
"""
tc_array = np.array(tc_array, copy=True)
word = tc_array[:, 0]
day = word >> 1
word = tc_array[:, 1].astype(np.uint64)
msecs = ((127) & word) * 1024
word = tc_array[:, 2]
msecs += word & 1023
msecs *= 1024
word = tc_array[:, 3]
msecs += word & 1023
return (np.datetime64(
str(year) + '-01-01T00:00:00Z', 's') +
msecs[:].astype('timedelta64[ms]') +
(day - 1)[:].astype('timedelta64[D]'))
def test_channel_uint64_wrong_dimensions(self):
""" Test posting with the wrong xyz dims"""
test_mat = np.random.randint(1, 2 ** 16 - 1, (16, 128, 128))
test_mat = test_mat.astype(np.uint64)
h = test_mat.tobytes()
bb = blosc.compress(h, typesize=64)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/layer1/0/0:100/0:128/0:16/', bb,
content_type='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='layer1',
resolution='0', x_range='0:100', y_range='0:128', z_range='0:16', t_range=None)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_channel_uint64_wrong_dimensions_numpy(self):
""" Test posting with the wrong xyz dims using the numpy interface"""
test_mat = np.random.randint(1, 2 ** 16 - 1, (16, 128, 128))
test_mat = test_mat.astype(np.uint64)
bb = blosc.pack_array(test_mat)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/layer1/0/0:100/0:128/0:16/', bb,
content_type='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='layer1',
resolution='0', x_range='0:100', y_range='0:128', z_range='0:16', t_range=None)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def _is_class_a_primitive(cls):
'''
Check if class is a number or string including numpy numbers
:param cls: any class
:return: True if class is a primitive class, else False
'''
primitives = [
np.float16, np.float32, np.float64, np.float128,
np.int8, np.int16, np.int32, np.int64,
bool, str, np.uint8, np.uint16, np.uint32, np.uint64,
int, float
]
return cls in primitives
def decompress_seq(x, length, bits=64):
x = np.uint64(x)
assert length <= (bits/2 - 1)
if x & (1L << (bits-1)):
return 'N' * length
result = bytearray(length)
for i in xrange(length):
result[(length-1)-i] = tk_seq.NUCS[x & np.uint64(0b11)]
x = x >> np.uint64(2)
return str(result)
def __init__(self, buf, offset = 0, idmap = None, idmap_size = 1024):
if idmap is None:
idmap = Cache(idmap_size)
self.offset = offset
if offset != 0:
self.buf = buf = buffer(buf, offset)
else:
self.buf = buf
self.total_size, self.index_offset, self.index_elements = self._Header.unpack_from(buf, 0)
self.index = numpy.frombuffer(buf,
offset = self.index_offset,
dtype = numpy.uint64,
count = self.index_elements)
self.idmap = idmap
if self.index_elements > 0 and self.index[0] >= (self._Header.size + self._NewHeader.size):
# New version, most likely
self.version, min_reader_version, self.schema_offset, self.schema_size = self._NewHeader.unpack_from(
buf, self._Header.size)
if self._CURRENT_VERSION < min_reader_version:
raise ValueError((
"Incompatible buffer, this buffer needs a reader with support for version %d at least, "
"this reader supports up to version %d") % (
min_reader_version,
self._CURRENT_VERSION
))
if self.schema_offset and self.schema_size:
if self.schema_offset > len(buf) or (self.schema_size + self.schema_offset) > len(buf):
raise ValueError("Corrupted input - bad schema location")
stored_schema = cPickle.loads(bytes(buffer(buf, self.schema_offset, self.schema_size)))
if not isinstance(stored_schema, Schema):
raise ValueError("Corrupted input - unrecognizable schema")
if self.schema is None or not self.schema.compatible(stored_schema):
self.schema = stored_schema
elif self.schema is None:
raise ValueError("Cannot map schema-less buffer without specifying schema")
elif self.index_elements > 0:
raise ValueError("Cannot reliably map version-0 buffers")
def normalise_data(self, timestamp, data):
""" Convert the data if needed """
if self._passthrough:
return
i = 0
for datum in data:
if self.needsfixup[i] is None:
i += 1
continue
if len(datum) == 0:
# Ignore entries with no data - this typically occurs when the
# plugin requests multiple metrics and the metrics do not all appear
# at every timestep
i += 1
continue
if self.accumulator[i] is None:
self.accumulator[i] = numpy.array(datum)
self.last[i] = numpy.array(datum)
else:
self.accumulator[i] += (datum - self.last[i]) % numpy.uint64(1L << self.needsfixup[i]['range'])
numpy.copyto(self.last[i], datum)
numpy.copyto(datum, self.accumulator[i])
i += 1
def batch(self):
"""Return a batch of samples sampled uniformly from the database.
Returns
-------
(numpy.ndarray, ...)
The sample values are returned in a tuple in the order of the
`keys` specified by the user.
"""
# Count the number of keys (i.e. data objects)
nb_keys = len(self.keys)
data = []
for key in self.keys:
data.append(np.zeros((self.batch_size,) + self.spec[key]['shape'],
dtype=self.spec[key]['dtype']))
while True:
# Sample indices uniformly
batch_idxs = self.rng.randint(self.db.nb_samples,
size=self.batch_size,
dtype=np.uint64)
for i, v in enumerate(batch_idxs):
sample = self.db.get_sample(v)
for k in range(nb_keys):
data[k][i] = sample[self.keys[k]]
# Account for batches with only one key
if 1 == len(data):
yield tuple(data)[0]
else:
yield tuple(data)