python类s_()的实例源码

mules.py 文件源码 项目:Wall-EEG 作者: neurotechuoft 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def getdata(self, seconds, flush=True ):
        """
            Flush all the Data present in MuLES buffer and, 
            Request and Retrieve a certain amount of Data indicated as seconds
            Data returned has the shape [seconds * sampling_frequency, channels] 

            Argument:
            seconds: used to calculate the amount of samples requested n_samples
                     n_samples = seconds * sampling_frequency
            flush:   Boolean, if True send the command Flush before getting Data,
                     Defaul = True
        """        
        if flush:        
            self.flushdata()

        # Size of data requested
        n_samples = int(round(seconds * self.params['sampling frequency']))
        n_columns = len(self.params['data format'])
        data_buffer = -1 * np.ones((n_samples, n_columns)) 

        while (data_buffer[0, n_columns - 1]) < 0 : #While the first row has not been rewriten
            new_data = self.getalldata()
            new_samples = new_data.shape[0]
            data_buffer = np.concatenate((data_buffer, new_data), axis =0)
            data_buffer = np.delete(data_buffer, np.s_[0:new_samples], 0)

        return data_buffer
mnist.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def mnist (labels = range(10)):
    from keras.datasets import mnist
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = (x_train.astype('float32') / 255.).round()
    x_test = (x_test.astype('float32') / 255.).round()
    x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
    x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
    def conc (x,y):
        return np.concatenate((y.reshape([len(y),1]),x),axis=1)
    def select (x,y):
        selected = np.array([elem for elem in conc(x, y) if elem[0] in labels])
        return np.delete(selected,0,1), np.delete(selected,np.s_[1::],1).flatten()
    x_train, y_train = select(x_train, y_train)
    x_test, y_test = select(x_test, y_test)
    return x_train, y_train, x_test, y_test
test_indexing.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_simple_broadcasting_errors(self):
        assign = self.assign
        s_ = np.s_

        a = np.zeros((5, 1))
        assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))
        assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))

        assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))
        assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))

        assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))
cma_es_lib.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def typical_x(self, dim):
            off = self.rotate(np.floor(np.arange(0, 3, 3. / dim)) /
                          np.logspace(0, 1, dim), inverse=True)
            off[np.s_[3:]] += 0.005
            off[-1] *= 1e2
            off[0] /= 2.0e3 if off[0] > 0 else 1e3
            off[2] /= 3.01e4 if off[2] < 0 else 2e4
            return self._x_opt(dim) + off
GDSII.py 文件源码 项目:PyGDSII 作者: UUhy 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def recordClip(self):
        '''
        recordClip()

        Clip trailing zeros from the record parameter
        '''
        self._record = np.delete(self._record,np.s_[self._recordIndex::],0)
searchlight.py 文件源码 项目:brainiak 作者: brainiak 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _singlenode_searchlight(l, msk, mysl_rad, bcast_var, extra_params):
    """Run searchlight function on block data in parallel.

    `extra_params` contains:

    - Searchlight function.
    - `Shape` mask.
    - Minimum active voxels proportion required to run the searchlight
      function.
    """

    voxel_fn = extra_params[0]
    shape_mask = extra_params[1]
    min_active_voxels_proportion = extra_params[2]
    outmat = np.empty(msk.shape, dtype=np.object)[mysl_rad:-mysl_rad,
                                                  mysl_rad:-mysl_rad,
                                                  mysl_rad:-mysl_rad]
    for i in range(0, outmat.shape[0]):
        for j in range(0, outmat.shape[1]):
            for k in range(0, outmat.shape[2]):
                if msk[i+mysl_rad, j+mysl_rad, k+mysl_rad]:
                    searchlight_slice = np.s_[
                        i:i+2*mysl_rad+1,
                        j:j+2*mysl_rad+1,
                        k:k+2*mysl_rad+1]
                    voxel_fn_mask = msk[searchlight_slice] * shape_mask
                    if (min_active_voxels_proportion == 0
                        or np.count_nonzero(voxel_fn_mask) / voxel_fn_mask.size
                            > min_active_voxels_proportion):
                        outmat[i, j, k] = voxel_fn(
                            [ll[searchlight_slice] for ll in l],
                            msk[searchlight_slice] * shape_mask,
                            mysl_rad,
                            bcast_var)
    return outmat
test_technical.py 文件源码 项目:catalyst 作者: enigmampc 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def expected_bbands(self, window_length, k, closes):
        """Compute the expected data (without adjustments) for the given
        window, k, and closes array.

        This uses talib.BBANDS to generate the expected data.
        """
        lower_cols = []
        middle_cols = []
        upper_cols = []

        ndates, nassets = closes.shape

        for n in range(nassets):
            close_col = closes[:, n]
            if np.isnan(close_col).all():
                # ta-lib doesn't deal well with all nans.
                upper, middle, lower = [np.full(ndates, np.nan)] * 3
            else:
                upper, middle, lower = talib.BBANDS(
                    close_col,
                    window_length,
                    k,
                    k,
                )

            upper_cols.append(upper)
            middle_cols.append(middle)
            lower_cols.append(lower)

        # Stack all of our uppers, middles, lowers into three 2d arrays
        # whose columns are the sids. After that, slice off only the
        # rows we care about.
        where = np.s_[window_length - 1:]
        uppers = np.column_stack(upper_cols)[where]
        middles = np.column_stack(middle_cols)[where]
        lowers = np.column_stack(lower_cols)[where]
        return uppers, middles, lowers
cma_es_lib.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def typical_x(self, dim):
            off = self.rotate(np.floor(np.arange(0, 3, 3. / dim)) /
                          np.logspace(0, 1, dim), inverse=True)
            off[np.s_[3:]] += 0.005
            off[-1] *= 1e2
            off[0] /= 2.0e3 if off[0] > 0 else 1e3
            off[2] /= 3.01e4 if off[2] < 0 else 2e4
            return self._x_opt(dim) + off
cbpdntv.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def var_yx_idx(self):
        r"""Get index expression for component block of :math:`\mathbf{y}`
        that is constrained to be equal to :math:`\mathbf{x}`.
        """

        return np.s_[...,-1]
cbpdntv.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def var_yx_idx(self):
        r"""Get index expression for component block of :math:`\mathbf{y}`
        that is constrained to be equal to :math:`\mathbf{x}`.
        """

        return np.s_[..., 0:self.cri.M]
cbpdn.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def index_primary(self):
        """Return an index expression appropriate for extracting the primary
        (inner) component of the main variables X, Y, etc.
        """

        return np.s_[..., 0:-self.cri.Cd]
cbpdn.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def index_addmsk(self):
        """Return an index expression appropriate for extracting the
        additive mask (outer) component of the main variables X, Y, etc."""

        return np.s_[..., -self.cri.Cd:]
base.py 文件源码 项目:voropy 作者: nschloe 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_vertex_mask(self, subdomain=None):
        if subdomain is None:
            # http://stackoverflow.com/a/42392791/353337
            return numpy.s_[:]
        if subdomain not in self.subdomains:
            self._mark_vertices(subdomain)
        return self.subdomains[subdomain]['vertices']
cma.py 文件源码 项目:cma 作者: hardmaru 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def typical_x(self, dim):
            off = self.rotate(np.floor(np.arange(0, 3, 3. / dim)) /
                          np.logspace(0, 1, dim), inverse=True)
            off[np.s_[3:]] += 0.005
            off[-1] *= 1e2
            off[0] /= 2.0e3 if off[0] > 0 else 1e3
            off[2] /= 3.01e4 if off[2] < 0 else 2e4
            return self._x_opt(dim) + off
test_indexing.py 文件源码 项目:krpcScripts 作者: jwvanderbeck 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_simple_broadcasting_errors(self):
        assign = self.assign
        s_ = np.s_

        a = np.zeros((5, 1))
        assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))
        assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))

        assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))
        assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))

        assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))
test_ekerns.py 文件源码 项目:GPflow 作者: GPflow 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def index_block(y, x, D):
    return np.s_[y * D:(y + 1) * D, x * D:(x + 1) * D]
transforms.py 文件源码 项目:GPflow 作者: GPflow 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def forward(self, x):
        # create diagonal matrices
        m = np.zeros((x.size * self.dim)).reshape(-1, self.dim, self.dim)
        x = x.reshape(-1, self.dim)
        m[(np.s_[:],) + np.diag_indices(x.shape[1])] = x
        return m
ImageStacker.py 文件源码 项目:VerySharp 作者: wilecoyote2015 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def processImage(self, index, data):
        # get the image
        raw_image = CommonFunctions.preprocessImage(data["image"], 
                                                   self.scale_factor,
                                                    interpolation=cv2.INTER_CUBIC)
        image_dimension = raw_image.shape

        # create output image as numpy array with upscaled image size
        processed_image = np.zeros(image_dimension, np.float32)

        # align all tiles
        for tile, transform_matrix in zip(self.tiles, data["transform_matrix"]):

            tile_slice_raw_image = np.s_[tile["y"][0]:tile["y"][1],
                                         tile["x"][0]:tile["x"][1]]
            raw_image_tile = raw_image[tile_slice_raw_image]
            tile_aligned = cv2.warpAffine(raw_image_tile,
                                          transform_matrix,
                                          (raw_image_tile.shape[1],raw_image_tile.shape[0]),
                                          flags=cv2.INTER_CUBIC + cv2.WARP_INVERSE_MAP);      

            # Insert the inner area of tile_aligned (so without margins) into
            # the appropriate area in the processed image
            min_x = tile["x"][0] + tile["margin_x"][0]
            min_y = tile["y"][0] + tile["margin_y"][0]
            max_x = tile["x"][1] - tile["margin_x"][1]
            max_y = tile["y"][1] - tile["margin_y"][1]
            tile_slice_processed_image = np.s_[min_y:max_y,
                                               min_x:max_x]

            max_y_aligned = tile_aligned.shape[0] - tile["margin_y"][1]
            max_x_aligned = tile_aligned.shape[1] - tile["margin_x"][1]
            tile_aligned_slice = np.s_[tile["margin_y"][0]:max_y_aligned,
                                       tile["margin_x"][0]:max_x_aligned]                                

            tile_aligned_without_margin = tile_aligned[tile_aligned_slice]

            processed_image[tile_slice_processed_image] = tile_aligned_without_margin

        return processed_image
lib_pipeline_img.py 文件源码 项目:PiLL 作者: lofar-astron 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def flatten(f, channel=0, freqaxis=0):
    """ Flatten a fits file so that it becomes a 2D image. Return new header and data """
    from astropy import wcs

    naxis=f[0].header['NAXIS']
    if naxis<2:
        raise RadioError('Can\'t make map from this')
    if naxis==2:
        return f[0].header,f[0].data

    w = wcs.WCS(f[0].header)
    wn=wcs.WCS(naxis=2)

    wn.wcs.crpix[0]=w.wcs.crpix[0]
    wn.wcs.crpix[1]=w.wcs.crpix[1]
    wn.wcs.cdelt=w.wcs.cdelt[0:2]
    wn.wcs.crval=w.wcs.crval[0:2]
    wn.wcs.ctype[0]=w.wcs.ctype[0]
    wn.wcs.ctype[1]=w.wcs.ctype[1]

    header = wn.to_header()
    header["NAXIS"]=2
    copy=('EQUINOX','EPOCH')
    for k in copy:
        r=f[0].header.get(k)
        if r:
            header[k]=r

    slice=[]
    for i in range(naxis,0,-1):
        if i<=2:
            slice.append(np.s_[:],)
        elif i==freqaxis:
            slice.append(channel)
        else:
            slice.append(0)

    # slice=(0,)*(naxis-2)+(np.s_[:],)*2
    return header,f[0].data[slice]
algorithms.py 文件源码 项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda 作者: SignalMedia 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def select_n_slow(dropped, n, keep, method):
    reverse_it = (keep == 'last' or method == 'nlargest')
    ascending = method == 'nsmallest'
    slc = np.s_[::-1] if reverse_it else np.s_[:]
    return dropped[slc].sort_values(ascending=ascending).head(n)


问题


面经


文章

微信
公众号

扫码关注公众号