python类append()的实例源码

algo.py 文件源码 项目:MultiObjectTracker 作者: alokwhitewolf 项目源码 文件源码 阅读 56 收藏 0 点赞 0 评论 0
def get_points(event,x,y,flags,param):
    global lpnts,rpnts

    if event == cv2.EVENT_LBUTTONDOWN:
        lpnts = np.append(lpnts, np.array([[x, y]]), axis=0)
        cv2.polylines(img, [lpnts], False, (0, 0, 255))



    if event == cv2.EVENT_RBUTTONDOWN:
        rpnts = np.append(rpnts, np.array([[x, y]]), axis=0)
        cv2.polylines(img, [rpnts], False, (255, 0, 0))

        if rpnts.size>2:
            check(lpnts, rpnts[-1], rpnts[-2])



#check if the new point crosses a line
velocity.py 文件源码 项目:MultiObjectTracker 作者: alokwhitewolf 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_points(event, x, y, flags, param):
    global lpnts, mode, counter, which_intersect

    if event == cv2.EVENT_LBUTTONDOWN:
        lpnts = np.append(lpnts, np.array([[x, y]]), axis=0)
        cv2.polylines(img, [lpnts], False, (0, 0, 255))
        if lpnts.size > 2:
            if mode == 0:

                #check(l1, lpnts[-1], lpnts[-2])
                if check(l1, lpnts[-1], lpnts[-2]):
                    which_intersect = 0
                    mode = 1
                #check(l2, lpnts[-1], lpnts[-2])
                if check(l2, lpnts[-1], lpnts[-2]):
                    which_intersect = 1
                    mode = 1

            elif mode == 1:

                counter += 1
                if check(lines[(which_intersect + 1) % 2], lpnts[-1], lpnts[-2]):
                    mode = 3
                    print counter


# check if the new point crosses a line
coonswarp.py 文件源码 项目:RasterFairy 作者: Quasimondo 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def rectifyCloud(xyc,autoPerimeterOffset=True,autoPerimeterDensity=True,
                 width=64, height=64, 
                 perimeterSubdivisionSteps=4, paddingScale=1.05, 
                 smoothing=0.001, warpQuality=9, perimeterOffset=None ):

    sourceGridPoints = getCloudGrid( xyc,autoPerimeterOffset=autoPerimeterOffset,autoPerimeterDensity=autoPerimeterDensity,
                 width=width, height=width, 
                 perimeterSubdivisionSteps=perimeterSubdivisionSteps, paddingScale=paddingScale, 
                 smoothing=smoothing, warpQuality=warpQuality, perimeterOffset=perimeterOffset)

    targetGridPoints = []
    for yi in range(height):
        for xi in range(width):
            targetGridPoints.append([xi,yi])

    return warpCloud( xyc, sourceGridPoints, targetGridPoints, warpQuality=warpQuality )
pca.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_normalized_dispersion(mat_mean, mat_var, nbins=20):
    mat_disp = (mat_var - mat_mean) / np.square(mat_mean)

    quantiles = np.percentile(mat_mean, np.arange(0, 100, 100 / nbins))
    quantiles = np.append(quantiles, mat_mean.max())

    # merge bins with no difference in value
    quantiles = np.unique(quantiles)

    if len(quantiles) <= 1:
        # pathological case: the means are all identical. just return raw dispersion.
        return mat_disp

    # calc median dispersion per bin
    (disp_meds, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, mat_disp, statistic='median', bins=quantiles)

    # calc median absolute deviation of dispersion per bin
    disp_meds_arr = disp_meds[disp_bins-1] # 0th bin is empty since our quantiles start from 0
    disp_abs_dev = abs(mat_disp - disp_meds_arr)
    (disp_mads, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, disp_abs_dev, statistic='median', bins=quantiles)

    # calculate normalized dispersion
    disp_mads_arr = disp_mads[disp_bins-1]
    disp_norm = (mat_disp - disp_meds_arr) / disp_mads_arr
    return disp_norm
Analysis.py 文件源码 项目:BioNanoAnalyst 作者: AppliedBioinformatics 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def parse_fasta(self):
        self.ref_id=dict()
        self.ref_inf=dict()
        i=1
        N = 0
        ref_inf=np.empty(shape=[0,3])
        for seqs in SeqIO.parse(self.ref,'fasta'):
            seq_id = seqs.id
            self.ref_id[i] = seq_id
            seq = str(seqs.seq.upper())
            seq_len = len(seq)
            self.ref_inf[seq_id]=seq_len
            N+=seq.count('N')
            ref_inf = np.append(ref_inf,[[i,seq_id,seq_len]],axis=0)
            i+=1
        self.ref_detail = pd.DataFrame(ref_inf,columns=['Index','Contig','Length(bp)'])
        self.N = N
Analysis.py 文件源码 项目:BioNanoAnalyst 作者: AppliedBioinformatics 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def qualification_filter(self):
        """
        Providing information of those unqualified and qualified contigs from the orginal fasta file
        with the criterion: >20Kb & >=5 restriction sites inside.
        """
        unqualified = np.empty(shape=[0,3])
        qualified = np.empty(shape=[0,4])
        rm_dup = self.RcmapTable[['CMapId','ContigLength','NumSites']].drop_duplicates()
        for i in self.ref_id.keys():
            index = i
            name = self.ref_id[i]
            length = self.ref_inf[name]
            if i not in self.RcmapTable['CMapId'].unique():
                unqualified = np.append(unqualified,[[index,name, length]],axis=0)
            else:
                Id = rm_dup[rm_dup['CMapId']==i].index[0]
                sites = rm_dup['NumSites'][Id]
                qualified = np.append(qualified,[[index,name,length,sites]],axis=0)
        self.unqualified = pd.DataFrame(unqualified, columns=['index','contig','length(bp)'])
        self.qualified = pd.DataFrame(qualified, columns=['index','contig','length(bp)','numSites'])
GradientBoostingAgentClass.py 文件源码 项目:simple_rl 作者: david-abel 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _pad_features_with_zeros(self, state, action):
        '''
        Args:
            features (iterable)

        Returns:
            (list): Of the same length as self.max_state_features
        '''
        features = state.features()
        while len(features) < self.max_state_features:
            features = np.append(features, 0)

        # Reshape per update to cluster regression in sklearn 0.17.
        reshaped_features = np.append(features, [self.actions.index(action)])
        reshaped_features = reshaped_features.reshape(1, -1)

        return reshaped_features
trainModel.py 文件源码 项目:Sound-classification-on-Raspberry-Pi-with-Tensorflow 作者: GianlucaPaolocci 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def parse_audio_files(parent_dir,sub_dirs,file_ext='*.wav'):
    ignored = 0
    features, labels, name = np.empty((0,161)), np.empty(0), np.empty(0)
    for label, sub_dir in enumerate(sub_dirs):
        print sub_dir
        for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):
            try:
                mfccs, chroma, mel, contrast, tonnetz = extract_features(fn)
                ext_features = np.hstack([mfccs, chroma, mel, contrast, tonnetz])
                features = np.vstack([features,ext_features])
                l = [fn.split('-')[1]] * (mfccs.shape[0])
                labels = np.append(labels, l)
        except (KeyboardInterrupt, SystemExit):
        raise
            except:
                ignored += 1
    print "Ignored files: ", ignored
    return np.array(features), np.array(labels, dtype = np.int)
_database.py 文件源码 项目:ananke 作者: beiko-lab 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def fill_array(self, target, value, chunk_size = 1000):
        """Fill the target HDF5 array with a single value. Useful for 
        initializing an array, since the rhdf5 package tends to segfault if you
        load an uninitialized data set.

        Parameters
        ----------
        target: str
            the location of the HDF5 array, e.g., "samples/time"
        value: any
            the value to fill the array with
        chunk_size: int
            the number of items to insert at a time. This only needs to be
            increased for very large data sets.
        """
        n = self.h5_table[target].shape[0]
        chunks = np.append(np.arange(0, n, chunk_size), n)
        for i in range(len(chunks)-1):
            self.h5_table[target][chunks[i]:chunks[i+1]] = (
                                            [value]*(chunks[i+1] - chunks[i]) )
feature_engineering.py 文件源码 项目:search-MjoLniR 作者: wikimedia 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def append_features(df, *cols):
    """Append features from columns to the features vector.

    Parameters
    ----------
    df : pyspark.sql.DataFrame
    cols : list of str

    Returns
    -------
    pyspark.sql.DataFrame
    """
    def add_features(feat, *other):
        raw = feat.toArray()
        return Vectors.dense(np.append(raw, map(float, other)))
    add_features_udf = F.udf(add_features, VectorUDT())
    new_feat_list = df.schema['features'].metadata['features'] + cols
    return df.withColumn('features', mjolnir.spark.add_meta(
        df._sc, add_features_udf('features', *cols), {'features': new_feat_list}))
data.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def init_ops(self):
        self.input_ops = []
        for thread_num in range(self.n_threads):
            op = {}
            for attr_num in range(self.n_attrs):
                fq = self.file_queues[thread_num][attr_num]
                args = self.read_args[attr_num]
                kwargs = self.read_kwargs[attr_num]
                _op = self.get_input_op(fq, *args, **kwargs)
                if self.trans_dicts and self.trans_dicts[attr_num]:
                    td = self.trans_dicts[attr_num]
                    for k in td:
                        if k in _op:
                            _op[td[k]] = _op.pop(k)
                op.update(_op)
            self.input_ops.append(op)
        self.apply_postprocessing()
        return self.input_ops
data.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def get_data_paths(paths, file_pattern=DEFAULT_TFRECORDS_GLOB_PATTERN):
    if not isinstance(paths, list):
        assert isstring(paths)
        paths = [paths]
    if not isinstance(file_pattern, list):
        assert isstring(file_pattern)
        file_patterns = [file_pattern] * len(paths)
    else:
        file_patterns = file_pattern
    assert len(file_patterns) == len(paths), (file_patterns, paths)
    datasources = []
    for path, file_pattern in zip(paths, file_patterns):
        if os.path.isdir(path):
            tfrecord_pattern = os.path.join(path, file_pattern)
            datasource = tf.gfile.Glob(tfrecord_pattern)
            datasource.sort()
            datasources.append(datasource)
        else:
            datasources.append([path])
    dl = map(len, datasources)
    assert all([dl[0] == d for d in dl[1:]]), dl
    return datasources
data.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def parse_standard_tfmeta(paths):
    meta_list = []
    for path in paths:
        if isstring(path):
            if path.startswith('meta') and path.endswith('.pkl'):
                mpaths = [path]
            else:
                assert os.path.isdir(path)
                mpaths = filter(lambda x: x.startswith('meta') and x.endswith('.pkl'),
                                os.listdir(path))
                mpaths = [os.path.join(path, mp) for mp in mpaths]
        else:
            # in this case, it's a list
            assert isinstance(path, list)
            mpaths = path
        d = {}
        for mpath in mpaths:
            d.update(cPickle.load(open(mpath)))
        meta_list.append(d)
    return meta_list
s2train.py 文件源码 项目:DmsMsgRcg 作者: bshao001 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def train_tas(model, model_scope, num_epoches, result_file):
    height, width = FEATURE_HEIGHT, FEATURE_WIDTH

    feats0, feats1 = read_features_tas(height, width)

    y0 = np.zeros((feats0.shape[0], 1), dtype=np.float32)
    y1 = np.ones((feats1.shape[0], 1), dtype=np.float32)

    all_feats = np.append(feats0, feats1, axis=0)
    all_y = np.append(y0, y1, axis=0)

    print("all_feats shapes: toll = {}, closed = {}, all = {}; "
          "and dtype = {}".format(feats0.shape, feats1.shape, all_feats.shape, all_feats.dtype))
    print("all_y shape: {}; and dtype={}".format(all_y.shape, all_y.dtype))

    res_dir = os.path.join(PROJECT_ROOT, 'Data', 'Result')
    img_cnn = ImgConvNets(model, model_scope, height, width, class_count=2, keep_prob=0.5,
                          batch_size=32, learning_rate=1e-4, lr_adaptive=True, num_epoches=num_epoches)

    img_cnn.train(all_feats, all_y, res_dir, result_file=result_file)
s2train.py 文件源码 项目:DmsMsgRcg 作者: bshao001 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def train_lss(model, model_scope, num_epoches, result_file):
    height, width = FEATURE_HEIGHT, FEATURE_WIDTH

    feats0, feats1, feats2, feats3 = read_features_lss(height, width)

    y0 = np.zeros((feats0.shape[0], 1), dtype=np.float32)
    y1 = np.ones((feats1.shape[0], 1), dtype=np.float32)
    y2 = np.ones((feats2.shape[0], 1), dtype=np.float32) * 2
    y3 = np.ones((feats3.shape[0], 1), dtype=np.float32) * 3

    all_feats = np.append(np.append(np.append(feats0, feats1, axis=0), feats2, axis=0),
                          feats3, axis=0)
    all_y = np.append(np.append(np.append(y0, y1, axis=0), y2, axis=0), y3, axis=0)

    print("all_feats shapes: zero toll = {}, closed = {}, normal = {}, congested = {},  all = {}; "
          "and dtype = {}".format(feats0.shape, feats1.shape, feats2.shape, feats3.shape,
                                  all_feats.shape, all_feats.dtype))
    print("all_y shape: {}; and dtype={}".format(all_y.shape, all_y.dtype))

    res_dir = os.path.join(PROJECT_ROOT, 'Data', 'Result')
    img_cnn = ImgConvNets(model, model_scope, height, width, class_count=4, keep_prob=0.5,
                          batch_size=32, learning_rate=1e-4, lr_adaptive=True, num_epoches=num_epoches)

    img_cnn.train(all_feats, all_y, res_dir, result_file=result_file)
raindistdemo.py 文件源码 项目:rain-metrics-python 作者: apendergrass 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def makedists(pdata,binl):
    ##### This is called from within makeraindist.
    ##### Caclulate distributions 
    pds=pdata.shape;    nlat=pds[1];    nlon=pds[0];    nd=pds[2]
    bins=np.append(0,binl)
    n=np.empty((nlon,nlat,len(binl)))
    binno=np.empty(pdata.shape)
    for ilon in range(nlon):
        for ilat in range(nlat):
            # this is the histogram - we'll get frequency from this
            thisn,thisbin=np.histogram(pdata[ilon,ilat,:],bins) 
            n[ilon,ilat,:]=thisn
            # these are the bin locations. we'll use these for the amount dist
            binno[ilon,ilat,:]=np.digitize(pdata[ilon,ilat,:],bins) 
    #### Calculate the number of days with non-missing data, for normalization
    ndmat=np.tile(np.expand_dims(np.nansum(n,axis=2),axis=2),(1,1,len(bins)-1))
    thisppdfmap=n/ndmat
    #### Iterate back over the bins and add up all the precip - this will be the rain amount distribution
    testpamtmap=np.empty(thisppdfmap.shape)
    for ibin in range(len(bins)-1):
        testpamtmap[:,:,ibin]=(pdata*(ibin==binno)).sum(axis=2)
    thispamtmap=testpamtmap/ndmat
    return thisppdfmap,thispamtmap
rainmetricdemo.py 文件源码 项目:rain-metrics-python 作者: apendergrass 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def makedists(pdata,binl):
    ##### This is called from within makeraindist.
    ##### Caclulate distributions 
    pds=pdata.shape;    nlat=pds[1];    nlon=pds[0];    nd=pds[2]
    bins=np.append(0,binl)
    n=np.empty((nlon,nlat,len(binl)))
    binno=np.empty(pdata.shape)
    for ilon in range(nlon):
        for ilat in range(nlat):
            # this is the histogram - we'll get frequency from this
            thisn,thisbin=np.histogram(pdata[ilon,ilat,:],bins) 
            n[ilon,ilat,:]=thisn
            # these are the bin locations. we'll use these for the amount dist
            binno[ilon,ilat,:]=np.digitize(pdata[ilon,ilat,:],bins) 
    #### Calculate the number of days with non-missing data, for normalization
    ndmat=np.tile(np.expand_dims(np.nansum(n,axis=2),axis=2),(1,1,len(bins)-1))
    thisppdfmap=n/ndmat
    #### Iterate back over the bins and add up all the precip - this will be the rain amount distribution
    testpamtmap=np.empty(thisppdfmap.shape)
    for ibin in range(len(bins)-1):
        testpamtmap[:,:,ibin]=(pdata*(ibin==binno)).sum(axis=2)
    thispamtmap=testpamtmap/ndmat
    return thisppdfmap,thispamtmap
rigid_transformations.py 文件源码 项目:autolab_core 作者: BerkeleyAutomation 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def linear_trajectory_to(self, target_tf, traj_len):
        """Creates a trajectory of poses linearly interpolated from this tf to a target tf.

        Parameters
        ----------
        target_tf : :obj:`RigidTransform`
            The RigidTransform to interpolate to.
        traj_len : int
            The number of RigidTransforms in the returned trajectory.

        Returns
        -------
        :obj:`list` of :obj:`RigidTransform`
            A list of interpolated transforms from this transform to the target.
        """
        if traj_len < 0:
            raise ValueError('Traj len must at least 0')
        delta_t = 1.0 / (traj_len + 1)
        t = 0.0
        traj = []
        while t < 1.0:
            traj.append(self.interpolate_with(target_tf, t))
            t += delta_t
        traj.append(target_tf)
        return traj
helpers.py 文件源码 项目:inqbus.rainflow 作者: Inqbus 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def get_extrema(data):
    # find extrema by finding indexes where diff changes sign
    data_diff = np.diff(data)
    asign = np.sign(data_diff)
    signchange = ((np.roll(asign, 1) - asign) != 0).astype(int)

    # first and last value is always a local extrema
    signchange[0] = 1

    # last value is missing because the diff-array is 1 value shorter than the
    # input array so we have to add it again
    signchange = np.append(signchange, np.array([1]))

    calc_data = data[np.where(signchange != 0)]

    return calc_data
helpers.py 文件源码 项目:inqbus.rainflow 作者: Inqbus 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def count_pairs(data):
    df = pd.DataFrame(data)

    start, target = df.columns.tolist()

    # first we create groups for each pair and take size of each group as count.
    # counts is a pandas.Series with the pairs as index
    counts = df.groupby([start, target]).size()

    # than we remove duplicate pairs from original dateframe,
    # so length and counts are equal in size
    df = df.drop_duplicates()

    # reset index to values of pairs to fit index of counts
    df.set_index([0, 1], inplace=True, drop=False)

    # now we append the counts as column to the original data
    df[2] = pd.Series(counts.values, index=counts.index)

    # just cast pandas-dataframe back to numpy 2d-array usable for following
    # steps
    array = df.values
    return array
layers.py 文件源码 项目:multimodal_varinf 作者: tmoer 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def kl_train(z,prior,posterior,hps):
    # push prior through AR layer
    logqs = posterior.logps(z)
    if hps.n_flow > 0:
        nice_layers = []
        print('Does this print')
        for i in range(hps.n_flow):
            nice_layers.append(nice_layer(tf.shape(z),hps,'nice{}'.format(i),ar=hps.ar))

        for i,layer in enumerate(nice_layers):
            z,log_det = layer.forward(z)
            logqs += log_det

    # track the KL divergence after transformation     
    logps = prior.logps(z)
    kl = logqs - logps
    return z, kl

### Autoregressive layers
layers.py 文件源码 项目:multimodal_varinf 作者: tmoer 项目源码 文件源码 阅读 65 收藏 0 点赞 0 评论 0
def forward(self,z):
        if not self.ar:
            mu,log_sigma = self._get_mu_and_sigma(z)
        else:
            # permute z
            z = tf.reshape(z,[-1]+[1]*self.hps.z_size)
            perm = np.random.permutation(self.hps.z_size)+1
            z = tf.transpose(z,np.append([0],perm))
            z = tf.reshape(z,[-1,self.hps.z_size])
            mu,log_sigma = ar_layer(z,self.hps,n_hidden=self.n_hidden)
        log_sigma = tf.clip_by_value(log_sigma,-5,5)
        if not self.hps.ignore_sigma_flow:
            y = z * tf.exp(log_sigma) + mu
            log_det = -1 * log_sigma
        else:
            y = z + mu
            log_det = 0.0
        return y,log_det
app.py 文件源码 项目:typing-pattern-recognition 作者: abinashmeher999 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def keyReleaseEvent(self, event):
        self.outerclass.end_time = np.append(self.outerclass.end_time, time.time())
        if event.key() == QtCore.Qt.Key_Return:
            if self.text() == self.outerclass.pwd:
                self.outerclass.timing_vector = np.empty((0,), dtype=np.float64)
                i = 0
                # print self.outerclass.end_time.size
                while i < self.outerclass.end_time.size - 1:
                    self.outerclass.timing_vector = np.append(self.outerclass.timing_vector, self.outerclass.start_time[i] - self.outerclass.end_time[i])
                    self.outerclass.timing_vector = np.append(self.outerclass.timing_vector, self.outerclass.end_time[i+1] - self.outerclass.start_time[i])
                    i += 1
                self.outerclass.timing_vector = np.append(self.outerclass.timing_vector, self.outerclass.start_time[i] - self.outerclass.end_time[i])
                print self.outerclass.start_time
                print self.outerclass.end_time
                print self.outerclass.timing_vector
                self.outerclass.tv_list.append(np.array(self.outerclass.timing_vector))
                self.outerclass.start_time = np.empty((0,), dtype=np.float64)
                self.outerclass.end_time = np.empty((0,), dtype=np.float64)
                self.outerclass.timing_vector = np.empty((0,), dtype=np.float64)
                self.clear()
            else:
                self.outerclass.end_time = np.empty((0,), dtype=np.float64)
                self.clear()
        # print "Key released"
        QtGui.QLineEdit.keyReleaseEvent(self, event)
minibatch.py 文件源码 项目:mx-rfcn 作者: giorking 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_image_array(roidb, scales, scale_indexes, need_mean=True):
    """
    build image array from specific roidb
    :param roidb: images to be processed
    :param scales: scale list
    :param scale_indexes: indexes
    :return: array [b, c, h, w], list of scales
    """
    num_images = len(roidb)
    processed_ims = []
    im_scales = []
    for i in range(num_images):
        im = cv2.imread(roidb[i]['image'])
        if roidb[i]['flipped']:
            im = im[:, ::-1, :]
        target_size = scales[scale_indexes[i]]
        im, im_scale = image_processing.resize(im, target_size, config.MAX_SIZE)
        im_tensor = image_processing.transform(im, config.PIXEL_MEANS, need_mean=need_mean)
        processed_ims.append(im_tensor)
        im_scales.append(im_scale)
    array = image_processing.tensor_vstack(processed_ims)
    return array, im_scales
minibatch.py 文件源码 项目:adversarial-frcnn 作者: xiaolonw 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _get_image_blob(roidb, scale_inds):
    """Builds an input blob from the images in the roidb at the specified
    scales.
    """
    num_images = len(roidb)
    processed_ims = []
    im_scales = []
    for i in xrange(num_images):
        im = cv2.imread(roidb[i]['image'])
        if roidb[i]['flipped']:
            im = im[:, ::-1, :]
        target_size = cfg.TRAIN.SCALES[scale_inds[i]]
        im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
                                        cfg.TRAIN.MAX_SIZE)
        im_scales.append(im_scale)
        processed_ims.append(im)

    # Create a blob to hold the input images
    blob = im_list_to_blob(processed_ims)

    return blob, im_scales
test_seg_scan_dsb_prl.py 文件源码 项目:dsb3 作者: EliasVansteenkiste 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def extract_candidates(predictions_scan, tf_matrix, pid, outputs_path):
    print 'computing blobs'
    start_time = time.time()
    blobs = blobs_detection.blob_dog(predictions_scan[0, 0], min_sigma=1, max_sigma=15, threshold=0.1)
    print 'blobs computation time:', (time.time() - start_time) / 60.
    print 'n blobs detected:', blobs.shape[0]

    blobs_original_voxel_coords = []
    for j in xrange(blobs.shape[0]):
        blob_j = np.append(blobs[j, :3], [1])
        blob_j_original = tf_matrix.dot(blob_j)
        blobs_original_voxel_coords.append(blob_j_original)

    blobs = np.asarray(blobs_original_voxel_coords)
    print blobs.shape
    utils.save_pkl(blobs, outputs_path + '/%s.pkl' % pid)
test_seg_scan_dsb.py 文件源码 项目:dsb3 作者: EliasVansteenkiste 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def extract_candidates(predictions_scan, tf_matrix, pid, outputs_path):
    print 'computing blobs'
    start_time = time.time()
    blobs = blobs_detection.blob_dog(predictions_scan[0, 0], min_sigma=1, max_sigma=15, threshold=0.1)
    print 'blobs computation time:', (time.time() - start_time) / 60.
    print 'n blobs detected:', blobs.shape[0]

    blobs_original_voxel_coords = []
    for j in xrange(blobs.shape[0]):
        blob_j = np.append(blobs[j, :3], [1])
        blob_j_original = tf_matrix.dot(blob_j)
        blobs_original_voxel_coords.append(blob_j_original)

    blobs = np.asarray(blobs_original_voxel_coords)
    print blobs.shape
    utils.save_pkl(blobs, outputs_path + '/%s.pkl' % pid)
commands.py 文件源码 项目:gbrs 作者: churchill-lab 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def get_genotype_probability(aln_profile, aln_specificity, sigma=0.12):
    # 'aln_specificity' should be a set of unit vectors (at least one of the entry is larger than 1.)
    num_haps = len(aln_profile)
    aln_vec = unit_vector(aln_profile)
    genoprob = []
    for i in xrange(num_haps):
        v1 = unit_vector(aln_specificity[i])
        for j in xrange(i, num_haps):
            if j == i:
                genoprob.append(sum(np.power(aln_vec - v1, 2))) # homozygotes
            else:
                v2 = unit_vector(aln_specificity[j])
                geno_vec = unit_vector(v1 + v2)
                # compute directional similarity
                genoprob.append(sum(np.power(aln_vec - geno_vec, 2))) # for heterozygotes
    genoprob = np.exp(np.array(genoprob) / (-2 * sigma * sigma))
    return np.array(genoprob / sum(genoprob))
tools.py 文件源码 项目:monogreedy 作者: jinjunqi 项目源码 文件源码 阅读 79 收藏 0 点赞 0 评论 0
def update_eva_history(eva_history, eva_candidate):

    for i in range(len(eva_candidate)):
        phi = eva_candidate[i]

        continue_flag = 0
        for j in range(len(eva_history.phi)):
            if numpy.sum(numpy.abs(phi - eva_history.phi[j])) < 1e-4:
                continue_flag = 1
                break
        if continue_flag == 1:
            continue

        eva_history.phi.append(phi.tolist())
        eva_history.time.append(eva_a_time(phi))
        eva_history.acc.append(eva_a_acc(phi))
derivatives.py 文件源码 项目:monogreedy 作者: jinjunqi 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def tune_tal(mono_phi_score, tal_list):
    errs = []
    tals = []
    for tal in tal_list:
        err = []
        for i in range(len(mono_phi_score)):
            mono_1 = numpy.delete(mono_phi_score, i, axis=0)
            dim_h = mono_phi_score[i][:-1]
            value_h, alpha = train_predict_regression(mono_1, dim_h, tal)
            err.append((value_h - mono_phi_score[i][-1])**2)
        err = numpy.mean(err)

        errs.append(err)
        tals.append(tal)
        print 'regression tal:', tal, 'err', err

    idx = numpy.argmin(errs)

    return tals[idx]


问题


面经


文章

微信
公众号

扫码关注公众号