python类s_()的实例源码

preprocessing.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def resize_image(image,target_shape, pad_value = 0):
    assert isinstance(target_shape, list) or isinstance(target_shape, tuple)
    add_shape, subs_shape = [], []

    image_shape = image.shape
    shape_difference = np.asarray(target_shape, dtype=int) - np.asarray(image_shape,dtype=int)
    for diff in shape_difference:
        if diff < 0:
            subs_shape.append(np.s_[int(np.abs(np.ceil(diff/2))):int(np.floor(diff/2))])
            add_shape.append((0, 0))
        else:
            subs_shape.append(np.s_[:])
            add_shape.append((int(np.ceil(1.0*diff/2)),int(np.floor(1.0*diff/2))))
    output = np.pad(image, tuple(add_shape), 'constant', constant_values=(pad_value, pad_value))
    output = output[subs_shape]
    return output
test_indexing.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test_prepend_not_one(self):
        assign = self.assign
        s_ = np.s_

        a = np.zeros(5)

        # Too large and not only ones.
        assert_raises(ValueError, assign, a, s_[...],  np.ones((2, 1)))

        with warnings.catch_warnings():
            # Will be a ValueError as well.
            warnings.simplefilter("error", DeprecationWarning)
            assert_raises(DeprecationWarning, assign, a, s_[[1, 2, 3],],
                          np.ones((2, 1)))
            assert_raises(DeprecationWarning, assign, a, s_[[[1], [2]],],
                          np.ones((2,2,1)))
csl.py 文件源码 项目:srep 作者: Answeror 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _get_data(path, preprocess):
    data = sio.loadmat(path)['gestures']
    data = [np.transpose(np.delete(segment.astype(np.float32), np.s_[7:192:8], 0))
            for segment in data.flat]
    if preprocess:
        data = list(Context.parallel(jb.delayed(preprocess)(segment, **PREPROCESS_KARGS)
                                     for segment in data))
    return data


#  @cached
#  def _get_data(path, bandstop, cut, downsample):
    #  data = sio.loadmat(path)['gestures']
    #  data = [np.transpose(np.delete(segment.astype(np.float32), np.s_[7:192:8], 0))
            #  for segment in data.flat]
    #  if bandstop:
        #  data = list(Context.parallel(jb.delayed(get_bandstop)(segment) for segment in data))
    #  if cut is not None:
        #  data = list(Context.parallel(jb.delayed(cut)(segment, framerate=FRAMERATE) for segment in data))
    #  if downsample > 1:
        #  data = [segment[::downsample].copy() for segment in data]
    #  return data
_spiketrain.py 文件源码 项目:nelpy 作者: nelpy 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __getitem__(self, *args):
        """epochs, units"""
        # by default, keep all units
        unitslice = slice(None, None, None)
        if isinstance(*args, int):
            epochslice = args[0]
        elif isinstance(*args, EpochArray):
            epochslice = args[0]
        else:
            try:
                slices = np.s_[args]; slices = slices[0]
                if len(slices) > 2:
                    raise IndexError("only [epochs, units] slicing is supported at this time!")
                elif len(slices) == 2:
                    epochslice, unitslice = slices
                else:
                    epochslice = slices[0]
            except TypeError:
                # only epoch to slice:
                epochslice = slices

        return epochslice, unitslice
_analogsignalarray.py 文件源码 项目:nelpy 作者: nelpy 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __getitem__(self, *args):
        """epochs, signals"""
        # by default, keep all signals
        signalslice = slice(None, None, None)
        if isinstance(*args, int):
            epochslice = args[0]
        elif isinstance(*args, core.EpochArray):
            epochslice = args[0]
        else:
            try:
                slices = np.s_[args]; slices = slices[0]
                if len(slices) > 2:
                    raise IndexError("only [epochs, signal] slicing is supported at this time!")
                elif len(slices) == 2:
                    epochslice, signalslice = slices
                else:
                    epochslice = slices[0]
            except TypeError:
                # only epoch to slice:
                epochslice = slices

        return epochslice, signalslice
base.py 文件源码 项目:voropy 作者: nschloe 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def get_edge_mask(self, subdomain=None):
        '''Get faces which are fully in subdomain.
        '''
        if subdomain is None:
            # http://stackoverflow.com/a/42392791/353337
            return numpy.s_[:]

        if subdomain not in self.subdomains:
            self._mark_vertices(subdomain)

        # A face is inside if all its edges are in.
        # An edge is inside if all its nodes are in.
        is_in = self.subdomains[subdomain]['vertices'][self.idx_hierarchy]
        # Take `all()` over the first index
        is_inside = numpy.all(is_in, axis=tuple(range(1)))

        if subdomain.is_boundary_only:
            # Filter for boundary
            is_inside = numpy.logical_and(is_inside, self.is_boundary_edge)

        return is_inside
base.py 文件源码 项目:voropy 作者: nschloe 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_face_mask(self, subdomain):
        '''Get faces which are fully in subdomain.
        '''
        if subdomain is None:
            # http://stackoverflow.com/a/42392791/353337
            return numpy.s_[:]

        if subdomain not in self.subdomains:
            self._mark_vertices(subdomain)

        # A face is inside if all its edges are in.
        # An edge is inside if all its nodes are in.
        is_in = self.subdomains[subdomain]['vertices'][self.idx_hierarchy]
        # Take `all()` over all axes except the last two (face_ids, cell_ids).
        n = len(is_in.shape)
        is_inside = numpy.all(is_in, axis=tuple(range(n-2)))

        if subdomain.is_boundary_only:
            # Filter for boundary
            is_inside = numpy.logical_and(is_inside, self.is_boundary_face)

        return is_inside
base.py 文件源码 项目:voropy 作者: nschloe 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_cell_mask(self, subdomain=None):
        if subdomain is None:
            # http://stackoverflow.com/a/42392791/353337
            return numpy.s_[:]

        if subdomain.is_boundary_only:
            # There are no boundary cells
            return numpy.array([])

        if subdomain not in self.subdomains:
            self._mark_vertices(subdomain)

        is_in = self.subdomains[subdomain]['vertices'][self.idx_hierarchy]
        # Take `all()` over all axes except the last one (cell_ids).
        n = len(is_in.shape)
        return numpy.all(is_in, axis=tuple(range(n-1)))
features.py 文件源码 项目:vqa.pytorch 作者: Cadene 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_features_old(self, index):
        try:
            self.features_array
        except AttributeError:
            if self.opt['mode'] == 'att':
                self.features_array = np.zeros((2048,14,14), dtype='f')
            elif self.opt['mode'] == 'noatt':
                self.features_array = np.zeros((2048), dtype='f')

        if self.opt['mode'] == 'att':
            self.dataset_features.read_direct(self.features_array,
                                              np.s_[index,:2048,:14,:14],
                                              np.s_[:2048,:14,:14])
        elif self.opt['mode'] == 'noatt':
            self.dataset_features.read_direct(self.features_array,
                                              np.s_[index,:2048],
                                              np.s_[:2048])
        return self.features_array
test_indexing.py 文件源码 项目:krpcScripts 作者: jwvanderbeck 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_prepend_not_one(self):
        assign = self.assign
        s_ = np.s_

        a = np.zeros(5)

        # Too large and not only ones.
        assert_raises(ValueError, assign, a, s_[...],  np.ones((2, 1)))

        with warnings.catch_warnings():
            # Will be a ValueError as well.
            warnings.simplefilter("error", DeprecationWarning)
            assert_raises(DeprecationWarning, assign, a, s_[[1, 2, 3],],
                          np.ones((2, 1)))
            assert_raises(DeprecationWarning, assign, a, s_[[[1], [2]],],
                          np.ones((2,2,1)))
decompose.py 文件源码 项目:yt 作者: yt-project 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def split_array(gle, gre, shape, psize):
    """ Split array into px*py*pz subarrays. """
    n_d = np.array(shape, dtype=np.int64)
    dds = (gre-gle)/shape
    left_edges = []
    right_edges = []
    shapes = []
    slices = []
    for i in range(psize[0]):
        for j in range(psize[1]):
            for k in range(psize[2]):
                piece = np.array((i, j, k), dtype=np.int64)
                lei = n_d * piece // psize
                rei = n_d * (piece + np.ones(3, dtype=np.int64)) // psize
                lle = gle + lei*dds
                lre = gle + rei*dds
                left_edges.append(lle)
                right_edges.append(lre)
                shapes.append(rei-lei)
                slices.append(np.s_[lei[0]:rei[0], lei[1]:
                                    rei[1], lei[2]:rei[2]])

    return left_edges, right_edges, shapes, slices
test_indexing.py 文件源码 项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda 作者: SignalMedia 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def test_prepend_not_one(self):
        assign = self.assign
        s_ = np.s_

        a = np.zeros(5)

        # Too large and not only ones.
        assert_raises(ValueError, assign, a, s_[...],  np.ones((2, 1)))

        with warnings.catch_warnings():
            # Will be a ValueError as well.
            warnings.simplefilter("error", DeprecationWarning)
            assert_raises(DeprecationWarning, assign, a, s_[[1, 2, 3],],
                          np.ones((2, 1)))
            assert_raises(DeprecationWarning, assign, a, s_[[[1], [2]],],
                          np.ones((2,2,1)))
datasets.py 文件源码 项目:self-supervision 作者: gustavla 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def voc2007_classification_generator2(which, batch_size, input_size,
                            outer_input_size,
                            shuffle=True,# seed=0,
                            color_transform=None, random_mirror=False):
    path = os.path.expandvars('$VOC2007_DIR/ImageSets/Main')

    assert which in ['test', 'val']
    imgs, C = dd.io.load('{}.h5'.format(which), ['/data', '/labels'])

    if shuffle:
        rs = np.random.RandomState()
        while True:
            II = rs.randint(len(imgs), size=batch_size)

            ii, cc = imgs[II], C[II]
            if random_mirror and rs.randint(2) == 1:
                ii = ii[:, :, ::-1]
            yield ii, cc
    else:
        for i in range(len(imgs)//batch_size):
            ss = np.s_[i*batch_size:(i+1)*batch_size]
            yield imgs[ss], C[ss]
array.py 文件源码 项目:ivport-v2 作者: ivmech 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def flush(self):
        super(PiBayerArray, self).flush()
        self._demo = None
        data = self.getvalue()[-6404096:]
        if data[:4] != b'BRCM':
            raise PiCameraValueError('Unable to locate Bayer data at end of buffer')
        # Strip header
        data = data[32768:]
        # Reshape into 2D pixel values
        data = np.frombuffer(data, dtype=np.uint8).\
                reshape((1952, 3264))[:1944, :3240]
        # Unpack 10-bit values; every 5 bytes contains the high 8-bits of 4
        # values followed by the low 2-bits of 4 values packed into the fifth
        # byte
        data = data.astype(np.uint16) << 2
        for byte in range(4):
            data[:, byte::5] |= ((data[:, 4::5] >> ((4 - byte) * 2)) & 3)
        data = np.delete(data, np.s_[4::5], 1)
        # XXX Should test camera's vflip and hflip settings here and adjust
        self.array = np.zeros(data.shape + (3,), dtype=data.dtype)
        self.array[1::2, 0::2, 0] = data[1::2, 0::2] # Red
        self.array[0::2, 0::2, 1] = data[0::2, 0::2] # Green
        self.array[1::2, 1::2, 1] = data[1::2, 1::2] # Green
        self.array[0::2, 1::2, 2] = data[0::2, 1::2] # Blue
apd_confocal_npz.py 文件源码 项目:FoundryDataBrowser 作者: ScopeFoundry 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def update_display(self):


        ii = self.settings['index']
        plane = self.settings['plane']

        if plane == 'xy':        
            arr_slice = np.s_[ii,:,:]
            index_max = self.dat['count_rate_map'].shape[0]
        elif plane == 'yz':
            arr_slice = np.s_[:,:,ii]
            index_max = self.dat['count_rate_map'].shape[2]
        elif plane == 'xz':
            arr_slice = np.s_[:,ii,:]
            index_max = self.dat['count_rate_map'].shape[1] 

        self.settings.index.change_min_max(0, index_max)


        self.imview.setImage(self.dat['count_rate_map'][arr_slice], autoLevels=self.settings['auto_level'], )

        other_ax = dict(xy='z', yz='x', xz='y' )[plane]

        self.info_label.setText("{} plane {}={} um (index={})".format(
            plane, other_ax, self.dat[other_ax+'_array'][ii], ii))
test_indexing.py 文件源码 项目:aws-lambda-numpy 作者: vitolimandibhrata 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_prepend_not_one(self):
        assign = self.assign
        s_ = np.s_

        a = np.zeros(5)

        # Too large and not only ones.
        assert_raises(ValueError, assign, a, s_[...],  np.ones((2, 1)))

        with warnings.catch_warnings():
            # Will be a ValueError as well.
            warnings.simplefilter("error", DeprecationWarning)
            assert_raises(DeprecationWarning, assign, a, s_[[1, 2, 3],],
                          np.ones((2, 1)))
            assert_raises(DeprecationWarning, assign, a, s_[[[1], [2]],],
                          np.ones((2,2,1)))
subs.py 文件源码 项目:TurbPlasma 作者: tulasinandan 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def compute1didx(extar,slc):
   x1=np.argmin(np.abs(extar[0]-slc[0]))
   x2=np.argmin(np.abs(extar[0]-slc[1]))
   if len(extar) == 2:
      y1=np.argmin(np.abs(extar[1]-slc[2]))
      y2=np.argmin(np.abs(extar[1]-slc[3]))
      if x1==x2: 
         IDX=np.s_[x1,y1:y2]
      elif y1==y2: 
         IDX=np.s_[x1:x2,y1]
   if len(extar) == 3:
      z1=np.argmin(np.abs(extar[2]-slc[4]))
      z2=np.argmin(np.abs(extar[2]-slc[5]))
      if (x1==x2 and y1==y2): IDX=np.s_[x1,y1,z1:z2]
      if (y1==y2 and z1==z2): IDX=np.s_[x1:x2,y1,z1]
      if (x1==x2 and z1==z2): IDX=np.s_[x1,y1:y2,z1]
   else:
      IDX=np.s_[x1:x2]
   return IDX
pyHDF_DSS.py 文件源码 项目:HaD-to-Py 作者: latomkovic 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_HDF_cell_WSE(hf, cell_number, flow_area):

    with h5py.File(hdf_filename,'r') as hf:

        flow_areas = hf['Results']['Unsteady']['Output']['Output Blocks']\
        ['Base Output']['Unsteady Time Series']['2D Flow Areas']

        dataset = flow_areas[flow_area]['Water Surface']
        timesteps = dataset.shape[0]

        data_list = np.zeros((timesteps,), dtype='Float64')
        dataset.read_direct(data_list, np.s_[0:timesteps,cell_number], np.s_[0:timesteps])
        data_list = np.array(data_list).tolist()

    return data_list                            


# This will go through all of the 1D and 2D observed points listed in the two_dim_coords and one_dim_comp_paths txt files
# Without those two files, the program will not run. This function returns data dictionaries for each gage
test_indexing.py 文件源码 项目:lambda-numba 作者: rlhotovy 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_prepend_not_one(self):
        assign = self.assign
        s_ = np.s_

        a = np.zeros(5)

        # Too large and not only ones.
        assert_raises(ValueError, assign, a, s_[...],  np.ones((2, 1)))

        with warnings.catch_warnings():
            # Will be a ValueError as well.
            warnings.simplefilter("error", DeprecationWarning)
            assert_raises(DeprecationWarning, assign, a, s_[[1, 2, 3],],
                          np.ones((2, 1)))
            assert_raises(DeprecationWarning, assign, a, s_[[[1], [2]],],
                          np.ones((2,2,1)))
test_indexing.py 文件源码 项目:deliver 作者: orchestor 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_prepend_not_one(self):
        assign = self.assign
        s_ = np.s_

        a = np.zeros(5)

        # Too large and not only ones.
        assert_raises(ValueError, assign, a, s_[...],  np.ones((2, 1)))

        with warnings.catch_warnings():
            # Will be a ValueError as well.
            warnings.simplefilter("error", DeprecationWarning)
            assert_raises(DeprecationWarning, assign, a, s_[[1, 2, 3],],
                          np.ones((2, 1)))
            assert_raises(DeprecationWarning, assign, a, s_[[[1], [2]],],
                          np.ones((2,2,1)))
RingBuffer.py 文件源码 项目:urh 作者: jopohl 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def push(self, values: np.ndarray):
        """
        Push values to buffer. If buffer can't store all values a ValueError is raised
        """
        n = len(values)
        if len(self) + n > self.size:
            raise ValueError("Too much data to push to RingBuffer")

        slide_1 = np.s_[self.right_index:min(self.right_index + n, self.size)]
        slide_2 = np.s_[:max(self.right_index + n - self.size, 0)]
        with self.__data.get_lock():
            data = np.frombuffer(self.__data.get_obj(), dtype=np.complex64)
            data[slide_1] = values[:slide_1.stop - slide_1.start]
            data[slide_2] = values[slide_1.stop - slide_1.start:]
            self.right_index += n

        self.__length.value += n
yields.py 文件源码 项目:flexCE 作者: bretthandrews 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def load_sym(self):
        """Load isotopic and elemental symbols and masses."""
        el_sym = pd.read_csv(join(self.path_yldgen, 'sym_atomicnum.txt'),
                             delim_whitespace=True, usecols=[0, 1],
                             names=['num', 'el'])
        self.atomic_num = np.array(el_sym['num'])
        self.element_all = np.array(el_sym['el'])
        snii_sym = pd.read_csv(join(self.path_yldgen, 'species.txt'),
                               delim_whitespace=True, skiprows=1,
                               usecols=[1, 2], names=['name', 'mass'])
        self.snii_sym = np.array(snii_sym['name'])
        self.snii_sym_mass = np.array(snii_sym['mass'])
        self.n_snii_sym = len(self.snii_sym)
        u, indices = np.unique([item.rstrip('0123456789')
                                for item in self.snii_sym], return_index=True)
        indices_s = np.argsort(indices)
        self.element = np.delete(u[indices_s], np.s_[13, 14])
        self.n_elements = len(self.element)
numpy_helper.py 文件源码 项目:latenttrees 作者: kaltwang 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def downsample_idx(N, N_max=100, axis=0, method='equidist'):
    if N > N_max:
        if method == 'equidist':
            step = (N - 1) / N_max
            idx_cont = (np.arange(N_max) + 0.5) * step

            # add small slope to idx-cont, to avoid rounding neighbouring values to the same integer.
            # max absolute value added/subtracted is 1/10 of the step size
            adjust = ((idx_cont * 2 / (N - 1)) - 1) * step / 10
            idx_cont += adjust

            idx = np.array(np.round(idx_cont), dtype=int)

        if method == 'random':
            idx = np.random.choice(N, size=N_max, replace=False)
            idx = np.sort(idx)
    else:
        idx = np.s_[:]
    return idx
test_indexing.py 文件源码 项目:Alfred 作者: jkachhadia 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_prepend_not_one(self):
        assign = self.assign
        s_ = np.s_

        a = np.zeros(5)

        # Too large and not only ones.
        assert_raises(ValueError, assign, a, s_[...],  np.ones((2, 1)))

        with warnings.catch_warnings():
            # Will be a ValueError as well.
            warnings.simplefilter("error", DeprecationWarning)
            assert_raises(DeprecationWarning, assign, a, s_[[1, 2, 3],],
                          np.ones((2, 1)))
            assert_raises(DeprecationWarning, assign, a, s_[[[1], [2]],],
                          np.ones((2,2,1)))
utils.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def format_barcode_summary_h5_key(genome, region, read_type):
    return '%s_%s_%s_barcode_reads' % (genome, region, read_type)
utils.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_full_alignment_base_quality_scores(read):
    """
    Returns base quality scores for the full read alignment, inserting zeroes for deletions and removing
    inserted and soft-clipped bases. Therefore, only returns quality for truly aligned sequenced bases.

    Args:
        read (pysam.AlignedSegment): read to get quality scores for

    Returns:
        np.array: numpy array of quality scores

    """

    quality_scores = np.fromstring(read.qual, dtype=np.byte) - tk_constants.ILLUMINA_QUAL_OFFSET

    start_pos = 0

    for operation,length in read.cigar:
        operation = cr_constants.cigar_numeric_to_category_map[operation]

        if operation == 'D':
            quality_scores = np.insert(quality_scores, start_pos, [0] * length)
        elif operation == 'I' or operation == 'S':
            quality_scores = np.delete(quality_scores, np.s_[start_pos:start_pos + length])

        if not operation == 'I' and not operation == 'S':
            start_pos += length

    return start_pos, quality_scores
test_cloudvolume.py 文件源码 项目:cloud-volume 作者: seung-lab 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def test_exists():

    # Bbox version
    delete_layer()
    cv, data = create_layer(size=(128,64,64,1), offset=(0,0,0))

    defexists = Bbox( (0,0,0), (128,64,64) )
    results = cv.exists(defexists)
    assert len(results) == 2
    assert results['1_1_1/0-64_0-64_0-64'] == True
    assert results['1_1_1/64-128_0-64_0-64'] == True

    fpath = os.path.join(cv.layer_cloudpath, cv.key, '64-128_0-64_0-64')
    fpath = fpath.replace('file://', '') + '.gz'
    os.remove(fpath)

    results = cv.exists(defexists)
    assert len(results) == 2
    assert results['1_1_1/0-64_0-64_0-64'] == True
    assert results['1_1_1/64-128_0-64_0-64'] == False

    # Slice version
    delete_layer()
    cv, data = create_layer(size=(128,64,64,1), offset=(0,0,0))

    defexists = np.s_[ 0:128, :, : ]

    results = cv.exists(defexists)
    assert len(results) == 2
    assert results['1_1_1/0-64_0-64_0-64'] == True
    assert results['1_1_1/64-128_0-64_0-64'] == True

    fpath = os.path.join(cv.layer_cloudpath, cv.key, '64-128_0-64_0-64')
    fpath = fpath.replace('file://', '') + '.gz'
    os.remove(fpath)

    results = cv.exists(defexists)
    assert len(results) == 2
    assert results['1_1_1/0-64_0-64_0-64'] == True
    assert results['1_1_1/64-128_0-64_0-64'] == False
preprocessing.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def padding3D(input, width_mode, pad_factor):

    if width_mode == 'multiple':
        assert isinstance(pad_factor, int)
        shape = input.shape[-3:]
        added_shape = [(0,0)]*len(input.shape[:-3])
        for dim in shape:
            added_shape.append((0,dim % pad_factor))
        output = np.pad(input, tuple(added_shape), 'constant', constant_values=(0, 0))

    elif width_mode == 'fixed':
        assert isinstance(pad_factor,list) or isinstance(pad_factor,tuple)
        output = np.pad(input, tuple(pad_factor), 'constant',constant_values=(0, 0))

    elif width_mode == 'match':
        assert isinstance(pad_factor, list) or isinstance(pad_factor, tuple)
        shape = input.shape[-3:]
        shape_difference = np.asarray(pad_factor) - np.asarray(shape)
        added_shape = [(0, 0)] * len(input.shape[:-3])
        subs_shape = [np.s_[:]]* len(input.shape[:-3])
        for diff in shape_difference:
            if diff < 0:
                subs_shape.append(np.s_[:diff])
                added_shape.append((0, 0))
            else:
                subs_shape.append(np.s_[:])
                added_shape.append((0, diff))

        output = np.pad(input, tuple(added_shape), 'constant', constant_values=(0, 0))
        output = output[subs_shape]
    else:
        raise ValueError("Padding3D error (src.helpers.preprocessing_utils): No existen padding method " + str(width_mode))
    return output
_database.py 文件源码 项目:ananke 作者: beiko-lab 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def get_sparse_matrix(self, chunk_size = 1000):
        """Fetches the time-series data matrix in compressed sparse row (csr)
        format. Does this in chunks to prevent memory usage issues.

        Parameters
        ----------
        chunk_size: int
            the number of items to fetch at one time. Default is 1000.

        Returns
        -------
        scipy.sparse.csr_matrix
            csr matrix object containing sequences/time-series as rows, samples
            /time-points as columns
        """
        data = np.empty(self.h5_table["timeseries/data"].shape)
        indices = np.empty(self.h5_table["timeseries/indices"].shape)
        indptr = np.empty(self.h5_table["timeseries/indptr"].shape)       
        chunks = list(range(0, data.shape[0], chunk_size))
        if chunks[-1] != data.shape[0]:
            chunks = chunks + [data.shape[0]]
        for i,j in zip(chunks[0:-1], chunks[1:]):
            self.h5_table["timeseries/data"].read_direct(data, np.s_[i:j],
                                                               np.s_[i:j])       
        chunks = list(range(0, indices.shape[0], chunk_size))
        if chunks[-1] != indices.shape[0]:
            chunks = chunks + [indices.shape[0]]
        for i,j in zip(chunks[0:-1], chunks[1:]):
            self.h5_table["timeseries/indices"].read_direct(indices,
                                                            np.s_[i:j],
                                                            np.s_[i:j])       
        chunks = list(range(0, indptr.shape[0], chunk_size))
        if chunks[-1] != indptr.shape[0]:
            chunks = chunks + [indptr.shape[0]]
        for i,j in zip(chunks[0:-1], chunks[1:]):
            self.h5_table["timeseries/indptr"].read_direct(indptr,
                                                           np.s_[i:j],
                                                           np.s_[i:j])
        return csr_matrix((data, indices, indptr))
bci_workshop_tools.py 文件源码 项目:Wall-EEG 作者: neurotechuoft 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def updatebuffer(data_buffer, new_data):
    """
    Concatenates "new_data" into "buffer_array", and returns an array with 
    the same size than "buffer_array" 
    """    

    new_samples = new_data.shape[0]
    new_buffer = np.concatenate((data_buffer, new_data), axis =0)
    new_buffer = np.delete(new_buffer, np.s_[0:new_samples], 0)

    return new_buffer


问题


面经


文章

微信
公众号

扫码关注公众号