def _write_binary_i_16(
task_handle, write_array, num_samps_per_chan, auto_start, timeout,
data_layout=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_written = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxWriteBinaryI16
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, c_bool32,
ctypes.c_double, ctypes.c_int,
wrapped_ndpointer(dtype=numpy.int16, flags=('C', 'W')),
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, auto_start, timeout,
data_layout.value, write_array,
ctypes.byref(samps_per_chan_written), None)
check_for_error(error_code)
return samps_per_chan_written.value
python类int16()的实例源码
def _read_binary_i_16(
task_handle, read_array, num_samps_per_chan, timeout,
fill_mode=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_read = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxReadBinaryI16
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
ctypes.c_int,
wrapped_ndpointer(dtype=numpy.int16, flags=('C', 'W')),
ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, timeout, fill_mode.value,
read_array, numpy.prod(read_array.shape),
ctypes.byref(samps_per_chan_read), None)
check_for_error(error_code)
return samps_per_chan_read.value
def jit_remove_edge(grid, e2k, neighbors, components, e):
"""Remove an edge from a spanning tree."""
k = e2k[e]
v1, v2 = grid[1:3, k]
jit_set_remove(neighbors[v1], v2)
jit_set_remove(neighbors[v2], v1)
stack = np.zeros(neighbors.shape[0], np.int16)
jit_set_add(stack, v1)
while stack[0]:
v1 = jit_set_pop(stack)
components[v1] = True
for i in range(neighbors[v1, 0]):
v2 = neighbors[v1, i + 1]
if not components[v2]:
jit_set_add(stack, v2)
return k
def get_3d_data_slices(slices): # get data in Hunsfield Units
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images )
image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0
# Convert to Hounsfield units (HU)
# The intercept is usually -1024
for slice_number in range(len(slices)): # from v 8
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1: # added 16 Jan 2016, evening
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
def get_pixels_hu(slices):
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
### slope can differ per slice -- so do it individually (case in point black_tset, slices 95 vs 96)
### Changes/correction - 31.01.2017
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
def get_3d_data_hu(path): # get data in Hunsfield Units
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
#slices.sort(key=lambda x: int(x.InstanceNumber)) # was x.InstanceNumber
#slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v8 - BUGGY
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from 22.02
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images )
image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0
# Convert to Hounsfield units (HU)
# The intercept is usually -1024
for slice_number in range(len(slices)): # from v 8
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1: # added 16 Jan 2016, evening
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
def generateCountMaps(self, coords):
'''Generates a count map for the provided list of coordinates.
'''
s = self.config['projective_field_size']
unpadded_size = self.config['output_size']
target_size = 3 + unpadded_size + 2 * s
countMaps = np.zeros((self.config['cls_nb'], target_size, target_size), dtype=np.int16)
for coord in coords:
y = coord[1] - self.config['contextual_pad']
x = coord[2] - self.config['contextual_pad']
if y >= 0 and y < self.config['tile_size'] and \
x >= 0 and x < self.config['tile_size']:
self.inc_region(countMaps[coord[0]], *self.target_sizes[y, x])
return np.moveaxis(countMaps, 0, -1).astype(np.float32)
def inc_region(self, dst, y, x, h, w):
'''Incremets dst in the specified region. Runs fastest on np.int8, but not much slower on
np.int16.'''
dh, dw = dst.shape
h2 = h // 2
w2 = w // 2
py = y - h2
px = x - w2
y_min = max(0, py)
y_max = min(dh, y + h2)
x_min = max(0, px)
x_max = min(dw, x + w2)
if y_max - y_min <= 0 or x_max - x_min <= 0:
return
dst[y_min:y_max, x_min:x_max] += 1
def generateCountMaps(self, coords):
'''Generates a count map for the provided list of coordinates. It can
count at most 256 object within the receptive field. Beyond that it
overflows.
'''
s = self.config['receptive_field_size']
pad = s // 2
unpadded_size = self.config['tile_size']
target_size = 1 + unpadded_size + 2 * pad
countMaps = np.zeros((self.config['cls_nb'], target_size, target_size), dtype=np.int16)
y_min = 0
y_max = unpadded_size
x_min = 0
x_max = unpadded_size
for coord in coords:
if coord[1] >= y_min and coord[1] < y_max and coord[2] >= x_min and coord[2] < x_max:
self.inc_region(countMaps[coord[0]], coord[1] + pad, coord[2] + pad, s, s)
return np.moveaxis(countMaps, 0, -1).astype(np.float32)
def generateCountMaps(self, coords):
'''Generates a count map for the provided list of coordinates.
'''
s = self.config['projective_field_size']
target_size = 3 + self.config['output_size'] + 2 * s
count_maps = np.zeros((self.config['cls_nb'], target_size, target_size), dtype=np.int16)
shift = - self.config['contextual_pad']
size = self.config['tile_size']
for coord in coords:
y = coord[1] + shift
x = coord[2] + shift
if y >= 0 and y < size and \
x >= 0 and x < size:
self.inc_region(count_maps[coord[0]], *self.target_sizes[y, x])
return np.moveaxis(count_maps, 0, -1).astype(np.float32)
def __read_annotations_old(self):
"""
Read the stimulus grid properties.
Returns a dictionary containing the parameter names as keys and the
parameter values as values.
------------------------------------------------
The returned objects must be added to the Block.
This reads an old version of the format that does not store paramater
names, so placeholder names are created instead.
ID: 29099
"""
# int16 * 14 -- an array of parameter values
values = np.fromfile(self._fsrc, dtype=np.int16, count=14)
# create dummy names and combine them with the values in a dict
# the dict will be added to the annotations
params = ['param%s' % i for i in range(len(values))]
annotations = dict(zip(params, values))
return annotations
def __read_annotations_old(self):
"""
Read the stimulus grid properties.
Returns a dictionary containing the parameter names as keys and the
parameter values as values.
------------------------------------------------
The returned objects must be added to the Block.
This reads an old version of the format that does not store paramater
names, so placeholder names are created instead.
ID: 29099
"""
# int16 * 14 -- an array of parameter values
values = np.fromfile(self._fsrc, dtype=np.int16, count=14)
# create dummy names and combine them with the values in a dict
# the dict will be added to the annotations
params = ['param%s' % i for i in range(len(values))]
annotations = dict(zip(params, values))
return annotations
def read_input_features(l, inp=sys.stdin):
if isinstance(inp, str):
with open(inp, 'r') as f:
return read_input_features(f)
print("%d samples" % l, file=sys.stderr)
xs = np.zeros((l, flen), np.int16)
ys = np.zeros((l, n*n*classes), np.int16)
i = 0
for line in inp:
xs[i, :], ys[i, :] = parse_csv_row_xy(line)
i += 1
if i % 10000 == 0:
print("%d read from disk" % i, file=sys.stderr)
return xs, ys
def slow_down_sound(sound, rate):
""" returns a sound which is a slowed down version of the original.
rate - at which the sound should be slowed down. eg. 0.5 would be half speed.
"""
raise NotImplementedError()
grow_rate = 1 / rate
# make it 1/rate times longer.
a1 = sndarray.array(sound)
surf = pygame.surfarray.make_surface(a1)
print (a1.shape[0] * grow_rate)
scaled_surf = pygame.transform.scale(surf, (int(a1.shape[0] * grow_rate), a1.shape[1]))
print (scaled_surf)
print (surf)
a2 = a1 * rate
print (a1.shape)
print (a2.shape)
print (a2)
sound2 = sndarray.make_sound(a2.astype(int16))
return sound2
def _check_valid_data(self, data):
"""Checks that the incoming data is a 2 x #elements ndarray of ints.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to verify.
Raises
------
ValueError
If the data is not of the correct shape or type.
"""
if data.dtype.type != np.int8 and data.dtype.type != np.int16 \
and data.dtype.type != np.int32 and data.dtype.type != np.int64 \
and data.dtype.type != np.uint8 and data.dtype.type != np.uint16 \
and data.dtype.type != np.uint32 and data.dtype.type != np.uint64:
raise ValueError('Must initialize image coords with a numpy int ndarray')
if data.shape[0] != 2:
raise ValueError('Illegal data array passed to image coords. Must have 2 coordinates')
if len(data.shape) > 2:
raise ValueError('Illegal data array passed to point cloud. Must have 1 or 2 dimensions')
def update(self, x):
"""Update the buffer.
Args:
x (numpy.ndarray): array of shape
(n_new_samples, n_channels(, n_points))
"""
if x.ndim != self.buffer.ndim:
raise ValueError('x has not the same number of dimensions as '
'the buffer.')
nw = x.shape[0]
# Determine index at which new values should be put into array
ind = np.arange(self.ind, self.ind + nw, dtype=np.int16) % self.n
self.buffer[ind, :] = x
# Set self.ind = to the index at which new locations were put.
# Separately defined here to allow new data to be an array rather
# than just one row
self.ind = (ind[-1] + 1) % self.n
self.pts += nw
def mark_noise(self, noise, nw=None):
"""Mark noisy samples in the buffer.
Mark the last `nw` samples in the buffer as noisy (noisy -> True;
clean -> False).
Args:
noise (bool): if True, mark the last nw samples as noise
Keyword Args:
nw (int): number of samples to mark as noise. If None, use n
points.
"""
if not nw:
nw = self.n
ind = np.arange(self.ind - nw, self.ind, dtype=np.int16) % self.n
self.noise[ind, :] = noise
def test_no_data_deserialization(self):
arr = np.int16([[[-32768, -32768, -32768, -32768],
[-32768, -32768, -32768, -32768],
[-32768, -32768, -32768, -32768],
[-32768, -32768, -32768, -32768]]])
epsg_code = 3857
extent = Extent(0.0, 0.0, 10.0, 10.0)
projected_extent = ProjectedExtent(extent, epsg_code)
tile = Tile(arr, 'SHORT', -32768)
rdd = BaseTestClass.pysc.parallelize([(projected_extent, tile)])
raster_layer = RasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd)
actual_tile = raster_layer.to_numpy_rdd().first()[1]
self.assertEqual(actual_tile.cell_type, tile.cell_type)
self.assertEqual(actual_tile.no_data_value, tile.no_data_value)
self.assertTrue((actual_tile.cells == tile.cells).all())
def wavWrite(y, fs, nbits, audioFile):
""" Write samples to WAV file
Args:
samples: (ndarray / 2D ndarray) (floating point) sample vector
mono: DIM: nSamples
stereo: DIM: nSamples x nChannels
fs: (int) Sample rate in Hz
nBits: (int) Number of bits
fnWAV: (string) WAV file name to write
"""
if nbits == 8:
intsamples = (y+1.0) * AudioIO.normFact['int' + str(nbits)]
fX = np.int8(intsamples)
elif nbits == 16:
intsamples = y * AudioIO.normFact['int' + str(nbits)]
fX = np.int16(intsamples)
elif nbits > 16:
fX = y
write(audioFile, fs, fX)
def batch_works(k):
if k == n_processes - 1:
paths = all_paths[k * int(len(all_paths) / n_processes) : ]
else:
paths = all_paths[k * int(len(all_paths) / n_processes) : (k + 1) * int(len(all_paths) / n_processes)]
for path in paths:
probs = np.load(os.path.join(input_path, path))
pred = np.argmax(probs, axis=3)
fg_prob = 1 - probs[..., 0]
pred = clean_contour(fg_prob, pred)
seg = np.zeros(pred.shape, dtype=np.int16)
seg[pred == 1] = 1
seg[pred == 2] = 2
seg[pred == 3] = 4
img = nib.Nifti1Image(seg, np.eye(4))
nib.save(img, os.path.join(output_path, path.replace('_probs.npy', '.nii.gz')))
def test_unaligned(self):
v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
d = v.view(np.dtype("S8"))
# unaligned source
x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]
x = x.view(np.dtype("S8"))
x[...] = np.array("b" * 8, dtype="S")
b = np.arange(d.size)
#trivial
assert_equal(d[b], d)
d[b] = x
# nontrivial
# unaligned index array
b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]
b = b.view(np.intp)[:d.size]
b[...] = np.arange(d.size)
assert_equal(d[b.astype(np.int16)], d)
d[b.astype(np.int16)] = x
# boolean
d[b % 2 == 0]
d[b % 2 == 0] = x[::2]
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, a.prod)
self.assertRaises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32,
np.uint32, np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype)
assert_array_equal(np.cumsum(a, axis=0), tgt)
tgt = np.array(
[[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype)
assert_array_equal(np.cumsum(a2, axis=0), tgt)
tgt = np.array(
[[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype)
assert_array_equal(np.cumsum(a2, axis=1), tgt)
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, np.prod, a)
self.assertRaises(ArithmeticError, np.prod, a2, 1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, np.cumprod, a)
self.assertRaises(ArithmeticError, np.cumprod, a2, 1)
self.assertRaises(ArithmeticError, np.cumprod, a)
else:
assert_array_equal(np.cumprod(a, axis=-1),
np.array([1, 2, 20, 220,
1320, 6600, 26400], ctype))
assert_array_equal(np.cumprod(a2, axis=0),
np.array([[1, 2, 3, 4],
[5, 12, 21, 36],
[50, 36, 84, 180]], ctype))
assert_array_equal(np.cumprod(a2, axis=-1),
np.array([[1, 2, 6, 24],
[5, 30, 210, 1890],
[10, 30, 120, 600]], ctype))
dataset.py 文件源码
项目:Video-Classification-Action-Recognition
作者: qijiezhao
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def get_crop_ix(self,training_size):
rescale_sizes=self.rescale_size
crop_inds=[]
for size_pair in rescale_sizes:
mother_w,mother_h=size_pair
crop_ix=np.zeros([5,4],dtype=np.int16)
w_indices=(0,mother_w-training_size)
h_indices=(0,mother_h-training_size)
w_center=(mother_w-training_size)/2
h_center=(mother_h-training_size)/2
crop_ix[4,:]=[w_center,h_center,training_size+w_center,training_size+h_center]
cnt=0
for i in w_indices:
for j in h_indices:
crop_ix[cnt,:]=[i,j,i+training_size,j+training_size]
cnt+=1
crop_inds.append(crop_ix)
return crop_inds
def __init__(self, data, comments = list()):
"""
Data structure for storing a sequence of amino acids.
The latter is represented by a contiguous array of integers.
The mapping between the amino acids and their numeric value
is done by using the ascii table.
Attributes
----------
comments [list] : list of informations about the sequence parsed from the FASTA file
The list is constructed by splitting the comments using the ' ' delimiter
N [int] : length of the sequence
data [np.ndarray] : contiguous array containing the ascii values of the amino acids
"""
self.comments = comments
self.N = len(data)
if isinstance(data, np.ndarray):
self.data = data
else:
# If a string is passed, the latter is converted to a numpy array
self.data = np.empty(self.N, dtype = np.int16)
for i in range(self.N):
self.data[i] = Sequence.charToInt(data[i])
def test_load_table_columnar_arrow_all(self, con, all_types_table):
pa = pytest.importorskip("pyarrow")
skip_if_no_arrow_loader(con)
names = ['boolean_', 'smallint_', 'int_', 'bigint_',
'float_', 'double_', 'varchar_', 'text_',
'time_', 'timestamp_', 'date_']
columns = [pa.array([True, False, None], type=pa.bool_()),
pa.array([1, 0, None]).cast(pa.int16()),
pa.array([1, 0, None]).cast(pa.int32()),
pa.array([1, 0, None]),
pa.array([1.0, 1.1, None]).cast(pa.float32()),
pa.array([1.0, 1.1, None]),
# no fixed-width string
pa.array(['a', 'b', None]),
pa.array(['a', 'b', None]),
(pa.array([1, 2, None]).cast(pa.int32())
.cast(pa.time32('s'))),
pa.array([datetime.datetime(2016, 1, 1, 12, 12, 12),
datetime.datetime(2017, 1, 1), None]),
pa.array([datetime.date(2016, 1, 1),
datetime.date(2017, 1, 1), None])]
table = pa.Table.from_arrays(columns, names=names)
con.load_table_arrow(all_types_table, table)