python类min()的实例源码

average_precision_calculator.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _zero_one_normalize(predictions, epsilon=1e-7):
    """Normalize the predictions to the range between 0.0 and 1.0.

    For some predictions like SVM predictions, we need to normalize them before
    calculate the interpolated average precision. The normalization will not
    change the rank in the original list and thus won't change the average
    precision.

    Args:
      predictions: a numpy 1-D array storing the sparse prediction scores.
      epsilon: a small constant to avoid denominator being zero.

    Returns:
      The normalized prediction.
    """
    denominator = numpy.max(predictions) - numpy.min(predictions)
    ret = (predictions - numpy.min(predictions)) / numpy.max(denominator,
                                                             epsilon)
    return ret
gui.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def update_data_sort_order(self, new_sort_order=None):
        if new_sort_order is not None:
            self.current_order = new_sort_order
        self.update_sort_idcs()
        self.data_image.set_extent((self.raw_lags[0], self.raw_lags[-1],
                            0, len(self.sort_idcs)))
        self.data_ax.set_ylim(0, len(self.sort_idcs))
        all_raw_data  = self.raw_data
        all_raw_data /= (1 + self.raw_data.mean(1)[:, np.newaxis])
        if len(all_raw_data) > 0:
            cmax          = 0.5*all_raw_data.max()
            cmin          = 0.5*all_raw_data.min()
            all_raw_data  = all_raw_data[self.sort_idcs, :]
        else:
            cmin = 0
            cmax = 1
        self.data_image.set_data(all_raw_data)
        self.data_image.set_clim(cmin, cmax)
        self.data_selection.set_y(len(self.sort_idcs)-len(self.selected_points))
        self.data_selection.set_height(len(self.selected_points))
        self.update_data_plot()
gui.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def plot_electrodes(self):
        if not getattr(self, 'collections', None):
            # It is important to set one facecolor per point so that we can change
            # it later
            self.electrode_collection = self.electrode_ax.scatter(self.x_position,
                                                                  self.y_position,
                                                                  facecolor=['black' for _ in self.x_position],
                                                                  s=30)
            self.electrode_ax.set_xlabel('Space [um]')
            self.electrode_ax.set_xticklabels([])
            self.electrode_ax.set_ylabel('Space [um]')
            self.electrode_ax.set_yticklabels([])
        else:
            self.electrode_collection.set_offsets(np.hstack([self.x_position[np.newaxis, :].T,
                                                             self.y_position[np.newaxis, :].T]))
        ax, x, y = self.electrode_ax, self.y_position, self.x_position
        ymin, ymax = min(x), max(x)
        yrange = (ymax - ymin)*0.5 * 1.05  # stretch everything a bit
        ax.set_ylim((ymax + ymin)*0.5 - yrange, (ymax + ymin)*0.5 + yrange)
        xmin, xmax = min(y), max(y)
        xrange = (xmax - xmin)*0.5 * 1.05  # stretch everything a bit
        ax.set_xlim((xmax + xmin)*0.5 - xrange, (xmax + xmin)*0.5 + xrange)

        self.ui.raw_data.draw_idle()
average_precision_calculator.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def _zero_one_normalize(predictions, epsilon=1e-7):
    """Normalize the predictions to the range between 0.0 and 1.0.

    For some predictions like SVM predictions, we need to normalize them before
    calculate the interpolated average precision. The normalization will not
    change the rank in the original list and thus won't change the average
    precision.

    Args:
      predictions: a numpy 1-D array storing the sparse prediction scores.
      epsilon: a small constant to avoid denominator being zero.

    Returns:
      The normalized prediction.
    """
    denominator = numpy.max(predictions) - numpy.min(predictions)
    ret = (predictions - numpy.min(predictions)) / numpy.max(denominator,
                                                             epsilon)
    return ret
average_precision_calculator.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def _zero_one_normalize(predictions, epsilon=1e-7):
    """Normalize the predictions to the range between 0.0 and 1.0.

    For some predictions like SVM predictions, we need to normalize them before
    calculate the interpolated average precision. The normalization will not
    change the rank in the original list and thus won't change the average
    precision.

    Args:
      predictions: a numpy 1-D array storing the sparse prediction scores.
      epsilon: a small constant to avoid denominator being zero.

    Returns:
      The normalized prediction.
    """
    denominator = numpy.max(predictions) - numpy.min(predictions)
    ret = (predictions - numpy.min(predictions)) / numpy.max(denominator,
                                                             epsilon)
    return ret
gpUtils.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def removeTopPCs(X, numRemovePCs):  
    t0 = time.time()
    X_mean = X.mean(axis=0)
    X -= X_mean
    XXT = symmetrize(blas.dsyrk(1.0, X, lower=0))
    s,U = la.eigh(XXT)
    if (np.min(s) < -1e-4): raise Exception('Negative eigenvalues found')
    s[s<0]=0
    ind = np.argsort(s)[::-1]
    U = U[:, ind]
    s = s[ind]
    s = np.sqrt(s)

    #remove null PCs
    ind = (s>1e-6)
    U = U[:, ind]
    s = s[ind]

    V = X.T.dot(U/s)    
    #print 'max diff:', np.max(((U*s).dot(V.T) - X)**2)
    X = (U[:, numRemovePCs:]*s[numRemovePCs:]).dot((V.T)[numRemovePCs:, :])
    X += X_mean

    return X
lungs_var3_d8g_222f.py 文件源码 项目:kaggle_dsb2017 作者: astoc 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def resample(image, scan, new_spacing=[1,1,1]):
    # Determine current pixel spacing
    spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing))
    spacing = np.array(list(spacing))

    resize_factor = spacing / new_spacing
    new_real_shape = image.shape * resize_factor
    new_shape = np.round(new_real_shape)
    real_resize_factor = new_shape / image.shape
    new_spacing = spacing / real_resize_factor

    #image = scipy.ndimage.interpolation.zoom(image, real_resize_factor)   # nor mode= "wrap"/xxx, nor cval=-1024 can ensure that the min and max values are unchanged .... # cval added
    image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest')  ### early orig modified 
    #image = scipy.ndimage.zoom(image, real_resize_factor, order=1)    # order=1 bilinear , preserves the min and max of the image -- pronbably better for us (also faster than spkine/order=2)

    #image = scipy.ndimage.zoom(image, real_resize_factor, mode='nearest', order=1)    # order=1 bilinear , preserves the min and max of the image -- pronbably better for us (also faster than spkine/order=2)

    return image, new_spacing
train_catastrophe_model_human.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def fit(self, X_train, y_train, X_valid, y_valid, X_test, y_test, steps=400):
        tf.global_variables_initializer().run()
        redirect=FDRedirector(STDERR)
        for i in range(steps):
            redirect.start()
            feed_dict = {self.labels:y_train}
            for key, tensor in self.features.items():
                feed_dict[tensor] = X_train[key]
            predictions, loss = sess.run([self.prediction, self.train_op], feed_dict=feed_dict)
            if i % 10 == 0:
                print("step:{} loss:{:.3g} np.std(predictions):{:.3g}".format(i, loss, np.std(predictions)))
                self.threshold = float(min(self.threshold_from_data(X_valid, y_valid), self.threshold_from_data(X_train, y_train)))
                tf.get_collection_ref("threshold")[0] = self.threshold
                self.print_metrics(X_train, y_train, "Training")
                self.print_metrics(X_valid, y_valid, "Validation")
            errors = redirect.stop()
            if errors:
                print(errors)
        self.print_metrics(X_test, y_test, "Test")
pbo_util.py 文件源码 项目:scikit-dataaccess 作者: MITHaystack 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def getLatLonRange(pbo_info, station_list):
    '''
    Retrive the range of latitude and longitude occupied by a set of stations

    @param pbo_info: PBO Metadata
    @param station_list: List of stations

    @return list containg two tuples, lat_range and lon_range
    '''

    coord_list = getStationCoords(pbo_info, station_list)

    lat_list = []
    lon_list = []
    for coord in coord_list:
        lat_list.append(coord[0])
        lon_list.append(coord[1])

    lat_range = (np.min(lat_list), np.max(lat_list))
    lon_range = (np.min(lon_list), np.max(lon_list))

    return [lat_range, lon_range]
voxelchain_visualize.py 文件源码 项目:voxcelchain 作者: hiroaki-kaneda 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def conv1(model):
    n1, n2, x, y, z = model.conv1.W.shape
    fig = plt.figure()
    for nn in range(0, n1):
        ax = fig.add_subplot(4, 5, nn+1, projection='3d')
        ax.set_xlim(0.0, x)
        ax.set_ylim(0.0, y)
        ax.set_zlim(0.0, z)
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_zticklabels([])
        for xx in range(0, x):
            for yy in range(0, y):
                for zz in range(0, z):
                    max = np.max(model.conv1.W.data[nn, :])
                    min = np.min(model.conv1.W.data[nn, :])
                    step = (max - min) / 1.0
                    C = (model.conv1.W.data[nn, 0, xx, yy, zz] - min) / step
                    color = cm.cool(C)
                    C = abs(1.0 - C)
                    ax.plot(np.array([xx]), np.array([yy]), np.array([zz]), "o", color=color, ms=7.0*C, mew=0.1)

    plt.savefig("result/graph_conv1.png")
Visualizer.py 文件源码 项目:rank-ordered-autoencoder 作者: paulbertens 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def reshapeWeights(self, weights, normalize=True, modifier=None):
        # reshape the weights matrix to a grid for visualization
        n_rows = int(np.sqrt(weights.shape[1]))
        n_cols = int(np.sqrt(weights.shape[1]))
        kernel_size = int(np.sqrt(weights.shape[0]/3))
        weights_grid = np.zeros((int((np.sqrt(weights.shape[0]/3)+1)*n_rows), int((np.sqrt(weights.shape[0]/3)+1)*n_cols), 3), dtype=np.float32)
        for i in range(weights_grid.shape[0]/(kernel_size+1)):
            for j in range(weights_grid.shape[1]/(kernel_size+1)):
                index = i * (weights_grid.shape[0]/(kernel_size+1))+j
                if not np.isclose(np.sum(weights[:, index]), 0):
                    if normalize:
                        weights_grid[i * (kernel_size + 1):i * (kernel_size + 1) + kernel_size, j * (kernel_size + 1):j * (kernel_size + 1) + kernel_size]=\
                            (weights[:, index].reshape(kernel_size, kernel_size, 3) - np.min(weights[:, index])) / ((np.max(weights[:, index]) - np.min(weights[:, index])) + 1.e-6)
                    else:
                        weights_grid[i * (kernel_size + 1):i * (kernel_size + 1) + kernel_size, j * (kernel_size + 1):j * (kernel_size + 1) + kernel_size] =\
                        (weights[:, index].reshape(kernel_size, kernel_size, 3))
                    if modifier is not None:
                        weights_grid[i * (kernel_size + 1):i * (kernel_size + 1) + kernel_size, j * (kernel_size + 1):j * (kernel_size + 1) + kernel_size] *= modifier[index]

        return weights_grid
disco_random_walks.py 文件源码 项目:genomedisco 作者: kundajelab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def fill_hdf5_with_sparse_by_chunk(mym1,mym2,fname,chunksize):
    start1=0
    end1=0
    n=mym1.shape[0]

    f=h5py.File(fname,'w')
    m1hdf5=f.create_dataset('m1',shape=(n,n),dtype='float')
    m2hdf5=f.create_dataset('m2',shape=(n,n),dtype='float')

    while end1<n:
        end1=np.min([n,(start1+chunksize)])
        print 'start1: '+str(start1)

        if (end1-start1)==1:
            m1hdf5[start1,:]=mym1[start1,:].toarray()
            m2hdf5[start1,:]=mym2[start1,:].toarray()
        else:
            m1hdf5[start1:end1,:]=mym1[start1:end1,:].toarray()
            m2hdf5[start1:end1,:]=mym2[start1:end1,:].toarray()
        start1=end1
    print 'sum of 1'
    print m1hdf5[:,:].sum()
    print m2hdf5[:,:].sum()
    f.close()
uw_rgbd.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def __init__(self, target, instance, files): 
            self.target = target 
            self.instance = instance
            mask_files = natural_sort(filter(lambda fn: '_maskcrop.png' in fn, files))
            depth_files = natural_sort(filter(lambda  fn: '_depthcrop.png' in fn, files))
            rgb_files = natural_sort(list(set(files) - set(mask_files) - set(depth_files)))
            loc_files = natural_sort(map(lambda fn: fn.replace('_crop.png', '_loc.txt'), rgb_files))

            # Ensure all have equal number of files (Hack! doesn't ensure filename consistency)
            nfiles = np.min([len(loc_files), len(mask_files), len(depth_files), len(rgb_files)])
            mask_files, depth_files, rgb_files, loc_files = mask_files[:nfiles], depth_files[:nfiles], \
                                                            rgb_files[:nfiles], loc_files[:nfiles]

            # print target, instance, len(loc_files), len(mask_files), len(depth_files), len(rgb_files)
            assert(len(mask_files) == len(depth_files) == len(rgb_files) == len(loc_files))

            # Read images
            self.rgb = ImageDatasetReader.from_filenames(rgb_files)
            self.depth = ImageDatasetReader.from_filenames(depth_files)
            self.mask = ImageDatasetReader.from_filenames(mask_files)

            # Read top-left locations of bounding box
            self.locations = np.vstack([np.loadtxt(loc, delimiter=',', dtype=np.int32) 
                                        for loc in loc_files])
bow_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def add(self, desc):
        if self.built_: 
            return

        if self.vocab_len_ < self.N_:
            Nd = len(desc)
            st, end = self.vocab_len_, min(self.vocab_len_ + Nd, self.N_)
            self.vocab_data_[st:end] = desc[:end-st]
            self.vocab_len_ += len(desc)
            print('Vocabulary building: {:}/{:}'.format(self.vocab_len_, self.N_))
        else: 
            print('Vocabulary built')
            self.built_ = True

        # else: 
        #     # Build vocab if not built already
        #     self.voc_.build(self.vocab_data_, self.K_)
        #     self.vocab_ = self.voc_.getCentroids()

        #     sz = self.vocab_.shape[:2]
        #     if sz[0] != self.K_ or sz[1] != self.D_: 
        #         raise RuntimeError('Voc error! KxD={:}x{:}, expected'.format(sz[0],sz[1],self.K_,self.D_))

        #     self.save('vocab.yaml.gz')
basic_model.py 文件源码 项目:sea-lion-counter 作者: rdinse 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def inc_region(self, dst, y, x, h, w):
    '''Incremets dst in the specified region. Runs fastest on np.int8, but not much slower on
    np.int16.'''

    dh, dw = dst.shape
    h2 = h // 2
    w2 = w // 2
    py = y - h2 
    px = x - w2 
    y_min = max(0, py)
    y_max = min(dh, y + h2)
    x_min = max(0, px)
    x_max = min(dw, x + w2)
    if y_max - y_min <= 0 or x_max - x_min <= 0:
      return

    dst[y_min:y_max, x_min:x_max] += 1
evaluation.py 文件源码 项目:a-nice-mc 作者: ermongroup 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def effective_sample_size(x, mu, var, logger):
    """
    Calculate the effective sample size of sequence generated by MCMC.
    :param x:
    :param mu: mean of the variable
    :param var: variance of the variable
    :param logger: logg
    :return: effective sample size of the sequence
    Make sure that `mu` and `var` are correct!
    """
    # batch size, time, dimension
    b, t, d = x.shape
    ess_ = np.ones([d])
    for s in range(1, t):
        p = auto_correlation_time(x, s, mu, var)
        if np.sum(p > 0.05) == 0:
            break
        else:
            for j in range(0, d):
                if p[j] > 0.05:
                    ess_[j] += 2.0 * p[j] * (1.0 - float(s) / t)

    logger.info('ESS: max [%f] min [%f] / [%d]' % (t / np.min(ess_), t / np.max(ess_), t))
    return t / ess_
evolution_strategy.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def alleviate_conditioning_in_coordinates(self, condition=1e8):
        """pass scaling from `C` to `sigma_vec`.

        As a result, `C` is a correlation matrix, i.e., all diagonal
        entries of `C` are `1`.
        """
        if max(self.dC) / min(self.dC) > condition:
            # allows for much larger condition numbers, if axis-parallel
            if hasattr(self, 'sm') and isinstance(self.sm, sampler.GaussFullSampler):
                old_coordinate_condition = max(self.dC) / min(self.dC)
                old_condition = self.sm.condition_number
                factors = self.sm.to_correlation_matrix()
                self.sigma_vec *= factors
                self.pc /= factors
                self._updateBDfromSM(self.sm)
                utils.print_message('\ncondition in coordinate system exceeded'
                                    ' %.1e, rescaled to %.1e, '
                                    '\ncondition changed from %.1e to %.1e'
                                      % (old_coordinate_condition, max(self.dC) / min(self.dC),
                                         old_condition, self.sm.condition_number),
                                    iteration=self.countiter)
evolution_strategy.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def plot_axes_scaling(self, iabscissa=1):
        from matplotlib import pyplot
        if not hasattr(self, 'D'):
            self.load()
        dat = self
        if np.max(dat.D[:, 5:]) == np.min(dat.D[:, 5:]):
            pyplot.text(0, dat.D[-1, 5],
                        'all axes scaling values equal to %s'
                        % str(dat.D[-1, 5]),
                        verticalalignment='center')
            return self  # nothing interesting to plot
        self._enter_plotting()
        pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b')
        # pyplot.hold(True)
        pyplot.grid(True)
        ax = array(pyplot.axis())
        # ax[1] = max(minxend, ax[1])
        pyplot.axis(ax)
        pyplot.title('Principle Axes Lengths')
        # pyplot.xticks(xticklocs)
        self._xlabel(iabscissa)
        self._finalize_plotting()
        return self
transformations.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def initialize(self, length=None):
        """see ``__init__``"""
        if length is None:
            length = len(self.bounds)
        max_i = min((len(self.bounds) - 1, length - 1))
        self._lb = array([self.bounds[min((i, max_i))][0]
                          if self.bounds[min((i, max_i))][0] is not None
                          else -np.Inf
                          for i in range(length)], copy=False)
        self._ub = array([self.bounds[min((i, max_i))][1]
                          if self.bounds[min((i, max_i))][1] is not None
                          else np.Inf
                          for i in range(length)], copy=False)
        lb = self._lb
        ub = self._ub
        # define added values for lower and upper bound
        self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20])
                             if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False)
        self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20])
                             if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
RelativeDualityGap.py 文件源码 项目:invo 作者: rafidrm 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _solveRelativeDG(self, points):
        """ Solves the norm constrained version of the problem.

            min sum z_q
            st  z_q >= c'x_q - 1
                z_q >= 1 - c'x_q
                A'y = c
                b'y = 1
                ||c|| = 1
                y >= 0
        """
        if self.normalize_c == 1:
            error = self._solveRelativeDGNorm1(points)
        elif self.normalize_c == np.inf:
            error = self._solveRelativeDGNormInf(points)
        return error
coordutils.py 文件源码 项目:astrobase 作者: waqasbhatti 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def xmatch_basic(ra1, dec1, ra2, dec2, match_radius=5.0):
    '''
    This is a quick matcher that uses great_circle_dist to find the closest
    object in (ra2,dec2) within match_radius to (ra1,dec1). (ra1,dec1) must be a
    scalar pair, while (ra2,dec2) must be np.arrays of the same lengths.

    PARAMETERS:
    ra1/dec1: coordinates of the target to match
    ra2/dec2: coordinate np.arrays of the list of coordinates to match to

    RETURNS:

    A tuple like the following:

    (True -> no match or False -> matched,
     minimum distance between target and list)

    '''

    min_dist_arcsec = np.min(great_circle_dist(ra1,dec1,ra2,dec2))

    if (min_dist_arcsec < match_radius):
        return (True,min_dist_arcsec)
    else:
        return (False,min_dist_arcsec)
utils.py 文件源码 项目:CausalGAN 作者: mkocaoglu 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def scatter2d(x,y,title='2dscatterplot',xlabel=None,ylabel=None):
    fig=plt.figure()
    plt.scatter(x,y)
    plt.title(title)
    if xlabel:
        plt.xlabel(xlabel)
    if ylabel:
        plt.ylabel(ylabel)

    if not 0<=np.min(x)<=np.max(x)<=1:
        raise ValueError('summary_scatter2d title:',title,' input x exceeded [0,1] range.\
                         min:',np.min(x),' max:',np.max(x))
    if not 0<=np.min(y)<=np.max(y)<=1:
        raise ValueError('summary_scatter2d title:',title,' input y exceeded [0,1] range.\
                         min:',np.min(y),' max:',np.max(y))

    plt.xlim([0,1])
    plt.ylim([0,1])
    return fig
test_nestio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def test_t_start_t_stop(self):
        """
        Tests if the t_start and t_stop arguments are correctly processed.
        """
        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-1256-0.gdf',
                directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)

        t_stop_targ = 490. * pq.ms
        t_start_targ = 410. * pq.ms

        seg = r.read_segment(gid_list=[], t_start=t_start_targ,
                             t_stop=t_stop_targ, lazy=False,
                             id_column_gdf=0, time_column_gdf=1)
        sts = seg.spiketrains
        self.assertTrue(np.max([np.max(st.magnitude) for st in sts
                                if len(st) > 0])
                        < t_stop_targ.rescale(sts[0].times.units).magnitude)
        self.assertTrue(np.min([np.min(st.magnitude) for st in sts
                                if len(st) > 0])
                        >= t_start_targ.rescale(sts[0].times.units).magnitude)
test_nestio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_t_start_t_stop(self):
        """
        Tests if the t_start and t_stop arguments are correctly processed.
        """
        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-1256-0.gdf',
                directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)

        t_stop_targ = 490. * pq.ms
        t_start_targ = 410. * pq.ms

        seg = r.read_segment(gid_list=[], t_start=t_start_targ,
                             t_stop=t_stop_targ, lazy=False,
                             id_column_gdf=0, time_column_gdf=1)
        sts = seg.spiketrains
        self.assertTrue(np.max([np.max(st.magnitude) for st in sts
                                if len(st) > 0])
                        < t_stop_targ.rescale(sts[0].times.units).magnitude)
        self.assertTrue(np.min([np.min(st.magnitude) for st in sts
                                if len(st) > 0])
                        >= t_start_targ.rescale(sts[0].times.units).magnitude)
tdx_formula.py 文件源码 项目:tdx_formula 作者: woodylee1974 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def CLAMP(self, param):
        """
        CLAMP(value, min, max)

        make the value to be clamped into the range of [min, max]
        """
        values = param[0]
        min_ = param[1]
        max_ = param[2]

        class Context:
            def __init__(self, min_, max_):
                self.min_ = min_
                self.max_ = max_

            def handleInput(self, value):
                if value < self.min_:
                    return self.min_
                elif value > self.max_:
                    return self.max_
                return value

        ctx = Context(min_, max_)
        result = values.apply(ctx.handleInput)
        return result
recipe-578082.py 文件源码 项目:code 作者: ActiveState 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def get_min_pos_kinect():

    (depth,_) = get_depth()

    minVal = np.min(depth) #This is the minimum value from the depth image
    minPos = np.argmin(depth) #This is the raw index of the minimum value above
    xPos = np.mod(minPos, xSize) #This is the x component of the raw index
    yPos = minPos//xSize #This is the y component of the raw index

    xList.append(xPos)
    del xList[0]
    xPos = int(np.mean(xList))
    yList.append(yPos)
    del yList[0]
    yPos = int(np.mean(yList))

    return (xSize - xPos-10, yPos, minVal)
motif_tools.py 文件源码 项目:mbin 作者: fanglab 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def shorten_motifs( contig_motifs, highscore_motifs ):
    """
    Keep only the shortest, most concise version of the high scoring
    motifs (reduces redundancy).
    """
    keeper_motifs    = set(highscore_motifs.keys())
    if len(highscore_motifs)>0:
        shortest_contiguous = min([len(m.split("-")[0]) for m in highscore_motifs.keys()])
        # (1) Sort by keys; shortest motif to longest
        motifs_s = sorted(highscore_motifs, key=len)
        # (2) For each motif, check if it's contained in a longer version of other motifs
        for m in motifs_s:
            motif_str =     m.split("-")[0]
            motif_idx = int(m.split("-")[1])
            for remaining in list(keeper_motifs):
                remaining_str =     remaining.split("-")[0]
                remaining_idx = int(remaining.split("-")[1])
                match         = re.search(motif_str, remaining_str)
                if match != None and (motif_idx + match.start()) == remaining_idx and len(remaining_str) > len(motif_str):
                    # 3. If True, remove the longer version
                    keeper_motifs.remove(remaining)
    return keeper_motifs
test_loss_functions.py 文件源码 项目:risk-slim 作者: ustunb 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def get_score_bounds_from_range(Z_min, Z_max, rho_lb, rho_ub, L0_max = None):
    "global variables: L0_reg_ind"
    edge_values = np.vstack([Z_min * rho_lb,
                             Z_max * rho_lb,
                             Z_min * rho_ub,
                             Z_max * rho_ub])

    if L0_max is None or L0_max == Z_min.shape[0]:
        s_min = np.sum(np.min(edge_values, axis = 0))
        s_max = np.sum(np.max(edge_values, axis = 0))
    else:
        min_values = np.min(edge_values, axis = 0)
        s_min_reg = np.sum(np.sort(min_values[L0_reg_ind])[0:L0_max])
        s_min_no_reg = np.sum(min_values[~L0_reg_ind])
        s_min = s_min_reg + s_min_no_reg

        max_values = np.max(edge_values, axis = 0)
        s_max_reg = np.sum(-np.sort(-max_values[L0_reg_ind])[0:L0_max])
        s_max_no_reg = np.sum(max_values[~L0_reg_ind])
        s_max = s_max_reg + s_max_no_reg

    return s_min, s_max


#setup weights
lattice_cpa.py 文件源码 项目:risk-slim 作者: ustunb 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def get_score_bounds(Z_min, Z_max, rho_lb, rho_ub, L0_reg_ind = None, L0_max = None):
    edge_values = np.vstack([Z_min * rho_lb,
                             Z_max * rho_lb,
                             Z_min * rho_ub,
                             Z_max * rho_ub])

    if (L0_max is None) or (L0_reg_ind is None) or (L0_max == Z_min.shape[0]):
        s_min = np.sum(np.min(edge_values, axis=0))
        s_max = np.sum(np.max(edge_values, axis=0))
    else:
        min_values = np.min(edge_values, axis=0)
        s_min_reg = np.sum(np.sort(min_values[L0_reg_ind])[0:L0_max])
        s_min_no_reg = np.sum(min_values[~L0_reg_ind])
        s_min = s_min_reg + s_min_no_reg

        max_values = np.max(edge_values, axis=0)
        s_max_reg = np.sum(-np.sort(-max_values[L0_reg_ind])[0:L0_max])
        s_max_no_reg = np.sum(max_values[~L0_reg_ind])
        s_max = s_max_reg + s_max_no_reg

    return s_min, s_max
preprocess.py 文件源码 项目:bob.bio.base 作者: bioidiap 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def main(command_line_parameters=None):
  """Preprocesses the given image with the given preprocessor."""
  args = command_line_arguments(command_line_parameters)

  logger.debug("Loading preprocessor")
  preprocessor = bob.bio.base.load_resource(' '.join(args.preprocessor), "preprocessor")

  logger.debug("Loading input data from file '%s'%s", args.input_file, " and '%s'" % args.annotation_file if args.annotation_file is not None else "")
  data = preprocessor.read_original_data(BioFile(1, args.input_file, 2), "", "")
  annotations = bob.db.base.annotations.read_annotation_file(args.annotation_file, 'named') if args.annotation_file is not None else None

  logger.info("Preprocessing data")
  preprocessed = preprocessor(data, annotations)
  preprocessor.write_data(preprocessed, args.output_file)
  logger.info("Wrote preprocessed data to file '%s'", args.output_file)

  if args.convert_as_image is not None:
    converted = bob.core.convert(preprocessed, 'uint8', dest_range=(0,255), source_range=(numpy.min(preprocessed), numpy.max(preprocessed)))
    bob.io.base.save(converted, args.convert_as_image)
    logger.info("Wrote preprocessed data to image file '%s'", args.convert_as_image)


问题


面经


文章

微信
公众号

扫码关注公众号