def __init__(self, buf, offset = 0):
# Accelerate class attributes
self._encode = self.encode
self._dtype = self.dtype
self._xxh = self.xxh
# Initialize buffer
if offset:
self._buf = self._likebuf = buffer(buf, offset)
else:
self._buf = buf
self._likebuf = _likebuffer(buf)
# Parse header and map index
self.index_elements, self.index_offset = self._Header.unpack_from(self._buf, 0)
self.index = numpy.ndarray(buffer = self._buf,
offset = self.index_offset,
dtype = self.dtype,
shape = (self.index_elements, 3))
python类BufferType()的实例源码
def find_sqltype(val):
"""
Find sqlite data type which matches the type of `val`.
Parameters
----------
val : any python type
Returns
-------
sqltype : str
String with sql type which can be used to set up a sqlile table
"""
mapping = {\
types.NoneType: 'NULL',
types.IntType: 'INTEGER',
types.LongType: 'INTEGER',
types.FloatType: 'REAL', # 'FLOAT' also works
types.StringTypes: 'TEXT', # StringType + UnicodeType
types.BufferType: 'BLOB'}
for typ in mapping.keys():
if isinstance(val, typ):
return mapping[typ]
raise StandardError("type '%s' unknown, cannot find mapping "
"to sqlite3 type" %str(type(val)))
def _likebuffer(buf):
if type(buf) is buffer or type(buf) is bytearray or type(buf) is bytes or isinstance(buf, bytes):
return buf
else:
return buffer(buf)
def unpack_from(cls, buf, offs, idmap = None):
if cython.compiled:
buf = _likebuffer(buf)
PyObject_GetBuffer(buf, cython.address(pybuf), PyBUF_SIMPLE) # lint:ok
pbuf = cython.cast(cython.p_uchar, pybuf.buf) # lint:ok
if offs >= pybuf.len:
PyBuffer_Release(cython.address(pybuf)) # lint
raise IndexError("Offset out of range")
else:
pbuf = buf
try:
if pbuf[offs] == 'm':
# inline bitmap
if cython.compiled and offs+7 >= pybuf.len:
raise IndexError("Object spans beyond buffer end")
rv = []
for i in xrange(7):
b = ord(pbuf[offs+1+i])
if b:
for j in xrange(8):
if b & (1<<j):
rv.append(i*8+j)
return frozenset(rv)
else:
# unpack a list, build a set from it
return frozenset(mapped_list.unpack_from(buf, offs, idmap))
finally:
if cython.compiled:
if type(buf) is buffer:
PyBuffer_Release(cython.address(pybuf)) # lint:ok
def _unpack_bytes_from_pybuffer(buf, offs, idmap):
if idmap is not None and offs in idmap:
return idmap[offs]
if cython.compiled:
try:
buf = _likebuffer(buf)
PyObject_GetBuffer(buf, cython.address(pybuf), PyBUF_SIMPLE) # lint:ok
rv = _unpack_bytes_from_cbuffer(cython.cast(cython.p_char, pybuf.buf), offs, pybuf.len, None) # lint:ok
finally:
PyBuffer_Release(cython.address(pybuf)) # lint:ok
else:
hpacker = struct.Struct('=H')
objlen = hpacker.unpack_from(buf, offs)[0]
offs = int(offs)
dataoffs = offs + hpacker.size
compressed = (objlen & 0x8000) != 0
if (objlen & 0x7FFF) == 0x7FFF:
qpacker = struct.Struct('=HQ')
objlen = qpacker.unpack_from(buf, offs)[1]
dataoffs = offs + qpacker.size
else:
objlen = objlen & 0x7FFF
rv = buffer(buf, dataoffs, objlen)
if compressed:
rv = lz4_decompress(rv)
else:
rv = bytes(rv)
if idmap is not None:
idmap[offs] = rv
return rv
def unpack(self, buf, idmap = None, factory_class_new = None, proxy_into = None):
return self.unpack_from(buffer(buf), 0, idmap, factory_class_new, proxy_into)
def __init__(self, buf, offset = 0, idmap = None, idmap_size = 1024):
if idmap is None:
idmap = Cache(idmap_size)
self.offset = offset
if offset != 0:
self.buf = buf = buffer(buf, offset)
else:
self.buf = buf
self.total_size, self.index_offset, self.index_elements = self._Header.unpack_from(buf, 0)
self.index = numpy.frombuffer(buf,
offset = self.index_offset,
dtype = numpy.uint64,
count = self.index_elements)
self.idmap = idmap
if self.index_elements > 0 and self.index[0] >= (self._Header.size + self._NewHeader.size):
# New version, most likely
self.version, min_reader_version, self.schema_offset, self.schema_size = self._NewHeader.unpack_from(
buf, self._Header.size)
if self._CURRENT_VERSION < min_reader_version:
raise ValueError((
"Incompatible buffer, this buffer needs a reader with support for version %d at least, "
"this reader supports up to version %d") % (
min_reader_version,
self._CURRENT_VERSION
))
if self.schema_offset and self.schema_size:
if self.schema_offset > len(buf) or (self.schema_size + self.schema_offset) > len(buf):
raise ValueError("Corrupted input - bad schema location")
stored_schema = cPickle.loads(bytes(buffer(buf, self.schema_offset, self.schema_size)))
if not isinstance(stored_schema, Schema):
raise ValueError("Corrupted input - unrecognizable schema")
if self.schema is None or not self.schema.compatible(stored_schema):
self.schema = stored_schema
elif self.schema is None:
raise ValueError("Cannot map schema-less buffer without specifying schema")
elif self.index_elements > 0:
raise ValueError("Cannot reliably map version-0 buffers")
def map_file(cls, fileobj, offset = 0, size = None):
fileobj.seek(offset)
total_size = cls._Header.unpack(fileobj.read(cls._Header.size))[0]
map_start = offset - offset % mmap.ALLOCATIONGRANULARITY
buf = mmap.mmap(fileobj.fileno(), total_size + offset - map_start,
access = mmap.ACCESS_READ, offset = map_start)
rv = cls(buffer(buf, offset - map_start))
rv._file = fileobj
rv._mmap = buf
return rv
def merge(cls, parts, destfile = None, tempdir = None,
discard_duplicates = False, discard_duplicate_keys = False):
if destfile is None:
destfile = tempfile.NamedTemporaryFile(dir = tempdir)
dtype = cls.dtype
basepos = destfile.tell()
# Reserve space for the header
write = destfile.write
write(cls._Header.pack(0, 0))
indexpos = basepos + cls._Header.size
# Merge the indexes
index = _merge_all([mapper.index for mapper in parts], dtype)
write(buffer(index))
nitems = len(index)
finalpos = destfile.tell()
if finalpos & 31:
write("\x00" * (32 - (finalpos & 31)))
finalpos = destfile.tell()
destfile.seek(basepos)
write(cls._Header.pack(nitems, indexpos - basepos))
destfile.seek(finalpos)
destfile.flush()
rv = cls.map_file(destfile, basepos, size = finalpos - basepos)
destfile.seek(finalpos)
return rv
def _search_hkey(self, hkey):
hi = self.index_elements
lo = 0
if cython.compiled:
dtype = self._dtype
if dtype is npuint64 or dtype is npuint32:
#lint:disable
PyObject_GetBuffer(self.index, cython.address(indexbuf), PyBUF_STRIDED_RO)
try:
if ( indexbuf.strides == cython.NULL
or indexbuf.len < hi * indexbuf.strides[0] ):
raise ValueError("Invalid buffer state")
pindex = cython.cast(cython.p_char, indexbuf.buf)
stride0 = indexbuf.strides[0]
if dtype is npuint64:
# A quick guess assuming uniform distribution of keys over the 64-bit value range
hint = (((hkey >> 32) * (hi-lo)) >> 32) + lo
return _c_search_hkey_ui64(hkey, pindex, stride0, hi, hint)
elif dtype is npuint32:
# A quick guess assuming uniform distribution of keys over the 64-bit value range
hint = ((hkey * (hi-lo)) >> 32) + lo
return _c_search_hkey_ui32(hkey, pindex, stride0, hi, hint)
else:
raise AssertionError("Internal error")
finally:
PyBuffer_Release(cython.address(indexbuf))
#lint:enable
else:
dtype = self.dtype
struct_dt = numpy.dtype([
('key_hash', dtype),
('key_offset', dtype),
('value', dtype),
])
return self.index.view(struct_dt).reshape(self.index.shape[0]).searchsorted(
numpy.array([(hkey,0,0)],dtype=struct_dt))[0]
def build(cls, initializer, destfile = None, tempdir = None, idmap = None,
value_array_kwargs = {},
id_mapper_kwargs = {}):
if destfile is None:
destfile = tempfile.NamedTemporaryFile(dir = tempdir)
# Must dump values and keys to temporary files because
# we don't know the size of the idmap before building it,
# and it has to be at the beginning of the file
with tempfile.NamedTemporaryFile(dir = tempdir, suffix = '.v',) as values_file:
with tempfile.NamedTemporaryFile(dir = tempdir, suffix = '.k') as keys_file:
initial_pos = destfile.tell()
value_array = cls.ValueArray.build(
_iter_values_dump_keys(initializer, keys_file), values_file,
tempdir = tempdir, idmap = idmap, **value_array_kwargs)
id_mapper = cls.IdMapper.build(
_iter_key_dump(keys_file), destfile,
tempdir = tempdir, **id_mapper_kwargs)
# pad to multiple of 32 for better cache alignment
pos = destfile.tell()
if pos & 31:
destfile.write("\x00" * (32 - (pos & 31)))
values_pos = destfile.tell()
blocklen = 1 << 20
for start in xrange(0, len(value_array.buf), blocklen):
destfile.write(buffer(value_array.buf, start, blocklen))
destfile.write(cls._Footer.pack(values_pos - initial_pos))
destfile.flush()
return cls(value_array, id_mapper)
def write(self, s):
t = type(s)
if t is types.StringType:
s = buffer(s)
elif not t is types.BufferType:
s = buffer(str(s))
self.response.BinaryWrite(s)
def pack_into(cls, obj, buf, offs, idmap = None, implicit_offs = 0):
if idmap is not None:
objid = id(obj)
idmap[objid] = offs + implicit_offs
objlen = len(obj)
if objlen > MIN_COMPRESS_THRESHOLD:
objcomp = lz4_compress(obj)
objcomplen = len(objcomp)
if objcomplen < (objlen - objlen/3):
# Must get substantial compression to pay the price
obj = objcomp
objlen = objcomplen
compressed = 0x8000
else:
compressed = 0
del objcomp
else:
compressed = 0
if (offs + 16 + len(obj)) > len(buf):
raise struct.error('buffer too small')
if cython.compiled:
try:
buf = _likebuffer(buf)
PyObject_GetBuffer(buf, cython.address(pybuf), PyBUF_WRITABLE) # lint:ok
pbuf = cython.cast(cython.p_char, pybuf.buf) + offs # lint:ok
if objlen < 0x7FFF:
cython.cast('_varstr_header *', pbuf).shortlen = objlen | compressed
offs += cython.sizeof(cython.ushort)
pbuf += cython.sizeof(cython.ushort)
else:
cython.cast('_varstr_header *', pbuf).shortlen = 0x7FFF | compressed
cython.cast('_varstr_header *', pbuf).biglen = objlen
offs += cython.sizeof('_varstr_header')
pbuf += cython.sizeof('_varstr_header')
memcpy(pbuf, cython.cast(cython.p_char, obj), objlen) # lint:ok
finally:
PyBuffer_Release(cython.address(pybuf)) # lint:ok
else:
if objlen < 0x7FFF:
hpacker = struct.Struct('=H')
hpacker.pack_into(buf, offs, objlen | compressed)
offs += hpacker.size
else:
qpacker = struct.Struct('=HQ')
qpacker.pack_into(buf, offs, 0x7FFF | compressed, objlen)
offs += qpacker.size
buf[offs:offs+objlen] = obj
offs += objlen
return offs
def get_packable(self, packer, padding, obj, offs = 0, buf = None, idmap = None, implicit_offs = 0):
if idmap is None:
idmap = {}
baseoffs = offs
if buf is None:
buf = self._pack_buffer
has_bitmap, none_bitmap, present_bitmap = self._get_bitmaps(obj)
fixed_present = present_bitmap & self._fixed_bitmap
size = packer.size
offs += size + padding
if offs > len(buf):
raise struct.error('buffer too small')
packable = [
has_bitmap,
none_bitmap,
]
packable_append = packable.append
idmap_get = idmap.get
slot_types = self.slot_types
alignment = self.alignment
for i,slot in enumerate(self.slot_keys):
mask = cython.cast(cython.ulonglong, 1) << i
if present_bitmap & mask:
val = getattr(obj, slot)
if fixed_present & mask:
packable_append(val)
else:
val_id = id(val)
val_offs = idmap_get(val_id)
if val_offs is None:
idmap[val_id] = ival_offs = offs + implicit_offs
offs = slot_types[slot].pack_into(val, buf, offs, idmap, implicit_offs)
padding = (offs + alignment - 1) / alignment * alignment - offs
offs += padding
else:
ival_offs = val_offs
packable_append(ival_offs - baseoffs - implicit_offs)
padding = (offs + alignment - 1) / alignment * alignment - offs
offs = offs + padding
if offs > len(buf):
raise struct.error('buffer too small')
return packable, offs
def build(cls, initializer, destfile = None, tempdir = None, idmap = None):
if destfile is None:
destfile = tempfile.NamedTemporaryFile(dir = tempdir)
initial_pos = destfile.tell()
write = destfile.write
write(cls._Header.pack(0, 0, 0))
write(cls._NewHeader.pack(cls._CURRENT_VERSION, cls._CURRENT_MINIMUM_READER_VERSION, 0, 0))
destfile.flush()
data_pos = destfile.tell()
schema = cls.schema
_index = []
index_parts = []
for item in initializer:
current_pos = data_pos - initial_pos
_index.append(current_pos)
buf = schema.pack(item, idmap, None, None, current_pos)
write(buf)
data_pos += len(buf)
if len(_index) >= 100000:
index_parts.append(numpy.array(_index, dtype = numpy.uint64))
del _index[:]
destfile.flush()
index_pos = destfile.tell()
if _index:
index_parts.append(numpy.array(_index, dtype = numpy.uint64))
del _index
if len(index_parts) > 1:
index = numpy.concatenate(index_parts)
elif index_parts:
index = index_parts[0]
else:
index = numpy.array([], dtype = numpy.uint64)
del index_parts
write(buffer(index))
destfile.flush()
schema_pos = destfile.tell()
cPickle.dump(schema, destfile, 2)
destfile.flush()
final_pos = destfile.tell()
destfile.seek(initial_pos)
write(cls._Header.pack(final_pos - initial_pos, index_pos - initial_pos, len(index)))
write(cls._NewHeader.pack(
cls._CURRENT_VERSION, cls._CURRENT_MINIMUM_READER_VERSION,
schema_pos - initial_pos, final_pos - schema_pos))
destfile.flush()
destfile.seek(final_pos)
return cls.map_file(destfile, initial_pos)
def hinted_bsearch(a, hkey, hint):
hi = len(a)
lo = 0
if hi <= lo:
return lo
elif hkey < a[0]:
return lo
elif hkey > a[hi-1]:
return hi
#lint:disable
PyObject_GetBuffer(a, cython.address(indexbuf), PyBUF_STRIDED_RO)
try:
if ( indexbuf.strides == cython.NULL
or indexbuf.len < hi * indexbuf.strides[0] ):
raise ValueError("Invalid buffer state")
pindex = cython.cast(cython.p_char, indexbuf.buf)
stride0 = indexbuf.strides[0]
#lint:enable
dtype = cython.cast('char*', a.dtype.char)[0]
if dtype == 'L' or dtype == 'Q':
# TO-DO: better hints?
return _c_search_hkey_ui64(hkey, pindex, stride0, hi, hint)
elif dtype == 'I':
# TO-DO: better hints?
return _c_search_hkey_ui32(hkey, pindex, stride0, hi, hint)
elif dtype == 'l' or dtype == 'q':
# TO-DO: better hints?
return _c_search_hkey_i64(hkey, pindex, stride0, hi, hint)
elif dtype == 'i':
# TO-DO: better hints?
return _c_search_hkey_i32(hkey, pindex, stride0, hi, hint)
elif dtype == 'd':
# TO-DO: better hints?
return _c_search_hkey_f64(hkey, pindex, stride0, hi, hint)
elif dtype == 'f':
# TO-DO: better hints?
return _c_search_hkey_f32(hkey, pindex, stride0, hi, hint)
else:
raise NotImplementedError("Unsupported array type %s" % (chr(dtype),))
finally:
PyBuffer_Release(cython.address(indexbuf)) #lint:ok
def iteritems(self):
buf = self._buf
dtype = self.dtype
index = self.index
if cython.compiled:
#lint:disable
buf = self._likebuf
PyObject_GetBuffer(buf, cython.address(pybuf), PyBUF_SIMPLE)
try:
if dtype is npuint64:
PyObject_GetBuffer(index, cython.address(indexbuf), PyBUF_STRIDED_RO)
try:
if ( indexbuf.strides == cython.NULL
or indexbuf.ndim < 2
or indexbuf.len < self.index_elements * indexbuf.strides[0] ):
raise ValueError("Invalid buffer state")
stride0 = indexbuf.strides[0]
stride1 = indexbuf.strides[1]
pindex = cython.cast(cython.p_char, indexbuf.buf)
for i in xrange(self.index_elements):
yield (
cython.cast(cython.p_ulonglong, pindex)[0],
cython.cast(cython.p_ulonglong, pindex + stride1)[0]
)
pindex += stride0
finally:
PyBuffer_Release(cython.address(indexbuf))
elif dtype is npuint32:
PyObject_GetBuffer(index, cython.address(indexbuf), PyBUF_STRIDED_RO)
try:
if ( indexbuf.strides == cython.NULL
or indexbuf.ndim < 2
or indexbuf.len < self.index_elements * indexbuf.strides[0] ):
raise ValueError("Invalid buffer state")
stride0 = indexbuf.strides[0]
stride1 = indexbuf.strides[1]
pindex = cython.cast(cython.p_char, indexbuf.buf)
for i in xrange(self.index_elements):
yield (
cython.cast(cython.p_uint, pindex)[0],
cython.cast(cython.p_uint, pindex + stride1)[0]
)
pindex += stride0
finally:
PyBuffer_Release(cython.address(indexbuf))
else:
for i in xrange(self.index_elements):
yield (
index[i,0],
index[i,1]
)
finally:
PyBuffer_Release(cython.address(pybuf))
#lint:enable
else:
for i in xrange(self.index_elements):
yield (index[i,0], index[i,1])
def _search_hkey(self, hkey):
hi = self.index_elements
lo = 0
hikey = self._index_max
lokey = self._index_min
if hkey < lokey:
return lo
elif hkey > hikey:
return hi
if cython.compiled:
dtype = self._dtype
if dtype is npuint64 or dtype is npuint32:
#lint:disable
PyObject_GetBuffer(self.index, cython.address(indexbuf), PyBUF_STRIDED_RO)
try:
if ( indexbuf.strides == cython.NULL
or indexbuf.len < hi * indexbuf.strides[0] ):
raise ValueError("Invalid buffer state")
pindex = cython.cast(cython.p_char, indexbuf.buf)
stride0 = indexbuf.strides[0]
if dtype is npuint64:
# TO-DO: better hints?
hint = (lo+hi)//2
return _c_search_hkey_ui64(hkey, pindex, stride0, hi, hint)
elif dtype is npuint32:
# TO-DO: better hints?
hint = (lo+hi)//2
return _c_search_hkey_ui32(hkey, pindex, stride0, hi, hint)
else:
raise AssertionError("Internal error")
finally:
PyBuffer_Release(cython.address(indexbuf))
#lint:enable
else:
raise AssertionError("Internal error")
else:
dtype = self.dtype
struct_dt = numpy.dtype([
('key', dtype),
('value', dtype),
])
return self.index.view(struct_dt).reshape(self.index.shape[0]).searchsorted(
numpy.array([(hkey,0)],dtype=struct_dt))[0]
def get(self, key, default = None):
if not isinstance(key, (int, long)):
return default
if key < 0 or key > self.dtypemax:
return default
hkey = key
startpos = self._search_hkey(hkey)
nitems = self.index_elements
if 0 <= startpos < nitems:
buf = self._buf
dtype = self._dtype
if cython.compiled:
#lint:disable
buf = self._likebuf
PyObject_GetBuffer(buf, cython.address(pybuf), PyBUF_SIMPLE)
try:
if dtype is npuint64:
PyObject_GetBuffer(self.index, cython.address(indexbuf), PyBUF_STRIDED_RO)
try:
if ( indexbuf.strides == cython.NULL
or indexbuf.ndim < 2
or indexbuf.len < nitems * indexbuf.strides[0] ):
raise ValueError("Invalid buffer state")
stride0 = indexbuf.strides[0]
stride1 = indexbuf.strides[1]
pindex = cython.cast(cython.p_char, indexbuf.buf) + startpos * stride0
pindexend = cython.cast(cython.p_char, indexbuf.buf) + indexbuf.len - stride0 + 1
if pindex < pindexend and cython.cast(cython.p_ulonglong, pindex)[0] == hkey:
return cython.cast(cython.p_ulonglong, pindex + stride1)[0]
finally:
PyBuffer_Release(cython.address(indexbuf))
elif dtype is npuint32:
PyObject_GetBuffer(self.index, cython.address(indexbuf), PyBUF_STRIDED_RO)
try:
if ( indexbuf.strides == cython.NULL
or indexbuf.ndim < 2
or indexbuf.len < nitems * indexbuf.strides[0] ):
raise ValueError("Invalid buffer state")
stride0 = indexbuf.strides[0]
stride1 = indexbuf.strides[1]
pindex = cython.cast(cython.p_char, indexbuf.buf) + startpos * stride0
pindexend = cython.cast(cython.p_char, indexbuf.buf) + indexbuf.len - stride0 + 1
if pindex < pindexend and cython.cast(cython.p_uint, pindex)[0] == hkey:
return cython.cast(cython.p_uint, pindex + stride1)[0]
finally:
PyBuffer_Release(cython.address(indexbuf))
else:
index = self.index
if startpos < nitems and index[startpos,0] == hkey:
return index[startpos,1]
finally:
PyBuffer_Release(cython.address(pybuf))
#lint:enable
else:
index = self.index
if startpos < nitems and index[startpos,0] == hkey:
return index[startpos,1]
return default
def __contains__(self, key):
if not isinstance(key, (int, long)):
return False
if key < 0 or key > self.dtypemax:
return False
hkey = key
startpos = self._search_hkey(hkey)
nitems = self.index_elements
if 0 <= startpos < nitems:
buf = self._buf
dtype = self._dtype
if cython.compiled:
#lint:disable
buf = self._likebuf
PyObject_GetBuffer(buf, cython.address(pybuf), PyBUF_SIMPLE)
try:
if dtype is npuint64:
PyObject_GetBuffer(self.index, cython.address(indexbuf), PyBUF_STRIDED_RO)
try:
if ( indexbuf.strides == cython.NULL
or indexbuf.ndim < 2
or indexbuf.len < nitems * indexbuf.strides[0] ):
raise ValueError("Invalid buffer state")
stride0 = indexbuf.strides[0]
pindex = cython.cast(cython.p_char, indexbuf.buf) + startpos * stride0
pindexend = cython.cast(cython.p_char, indexbuf.buf) + indexbuf.len - stride0 + 1
if pindex < pindexend and cython.cast(cython.p_ulonglong, pindex)[0] == hkey:
return True
finally:
PyBuffer_Release(cython.address(indexbuf))
elif dtype is npuint32:
PyObject_GetBuffer(self.index, cython.address(indexbuf), PyBUF_STRIDED_RO)
try:
if ( indexbuf.strides == cython.NULL
or indexbuf.ndim < 2
or indexbuf.len < nitems * indexbuf.strides[0] ):
raise ValueError("Invalid buffer state")
stride0 = indexbuf.strides[0]
pindex = cython.cast(cython.p_char, indexbuf.buf) + startpos * stride0
pindexend = cython.cast(cython.p_char, indexbuf.buf) + indexbuf.len - stride0 + 1
if pindex < pindexend and cython.cast(cython.p_uint, pindex)[0] == hkey:
return True
finally:
PyBuffer_Release(cython.address(indexbuf))
else:
index = self.index
if startpos < nitems and index[startpos,0] == hkey:
return True
finally:
PyBuffer_Release(cython.address(pybuf))
#lint:enable
else:
index = self.index
if startpos < nitems and index[startpos,0] == hkey:
return True
return False
def get_iter(self, key):
if not isinstance(key, (int, long)):
return
if key < 0 or key > self.dtypemax:
return
hkey = key
startpos = self._search_hkey(hkey)
nitems = self.index_elements
if 0 <= startpos < nitems:
buf = self._buf
dtype = self._dtype
if cython.compiled:
#lint:disable
buf = self._likebuf
PyObject_GetBuffer(buf, cython.address(pybuf), PyBUF_SIMPLE)
try:
if dtype is npuint64:
PyObject_GetBuffer(self.index, cython.address(indexbuf), PyBUF_STRIDED_RO)
try:
if ( indexbuf.strides == cython.NULL
or indexbuf.ndim < 2
or indexbuf.len < nitems * indexbuf.strides[0] ):
raise ValueError("Invalid buffer state")
stride0 = indexbuf.strides[0]
stride1 = indexbuf.strides[1]
pindex = cython.cast(cython.p_char, indexbuf.buf) + startpos * stride0
pindexend = cython.cast(cython.p_char, indexbuf.buf) + indexbuf.len - stride0 + 1
while pindex < pindexend and cython.cast(cython.p_ulonglong, pindex)[0] == hkey:
yield cython.cast(cython.p_ulonglong, pindex + stride1)[0]
pindex += stride0
finally:
PyBuffer_Release(cython.address(indexbuf))
elif dtype is npuint32:
PyObject_GetBuffer(self.index, cython.address(indexbuf), PyBUF_STRIDED_RO)
try:
if ( indexbuf.strides == cython.NULL
or indexbuf.ndim < 2
or indexbuf.len < nitems * indexbuf.strides[0] ):
raise ValueError("Invalid buffer state")
stride0 = indexbuf.strides[0]
stride1 = indexbuf.strides[1]
pindex = cython.cast(cython.p_char, indexbuf.buf) + startpos * stride0
pindexend = cython.cast(cython.p_char, indexbuf.buf) + indexbuf.len - stride0 + 1
while pindex < pindexend and cython.cast(cython.p_uint, pindex)[0] == hkey:
yield cython.cast(cython.p_uint, pindex + stride1)[0]
pindex += stride0
finally:
PyBuffer_Release(cython.address(indexbuf))
else:
index = self.index
while startpos < nitems and index[startpos,0] == hkey:
yield index[startpos,1]
startpos += 1
finally:
PyBuffer_Release(cython.address(pybuf))
#lint:enable
else:
index = self.index
while startpos < nitems and index[startpos,0] == hkey:
yield index[startpos,1]
startpos += 1