python类delayed()的实例源码

models.py 文件源码 项目:siHMM 作者: Ardavans 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _resample_labels_joblib(self,num_procs):
        from joblib import Parallel, delayed
        import parallel

        if len(self.labels_list) > 0:
            parallel.model = self

            raw = Parallel(n_jobs=num_procs,backend='multiprocessing')\
                    (delayed(parallel._get_sampled_labels)(idx)
                            for idx in range(len(self.labels_list)))

            for l, (z,normalizer) in zip(self.labels_list,raw):
                l.z, l._normalizer = z, normalizer


    ### Mean Field
models.py 文件源码 项目:siHMM 作者: Ardavans 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def _joblib_resample_states(self,states_list,num_procs):
        from joblib import Parallel, delayed
        import parallel

        # warn('joblib is segfaulting on OS X only, not sure why')

        if len(states_list) > 0:
            joblib_args = list_split(
                    [self._get_joblib_pair(s) for s in states_list],
                    num_procs)

            parallel.model = self
            parallel.args = joblib_args

            raw_stateseqs = Parallel(n_jobs=num_procs,backend='multiprocessing')\
                    (delayed(parallel._get_sampled_stateseq)(idx)
                            for idx in range(len(joblib_args)))

            for s, (stateseq, log_likelihood) in zip(
                    [s for grp in list_split(states_list,num_procs) for s in grp],
                    [seq for grp in raw_stateseqs for seq in grp]):
                s.stateseq, s._normalizer = stateseq, log_likelihood
models.py 文件源码 项目:siHMM 作者: Ardavans 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _joblib_meanfield_update_states(self,states_list,num_procs):
        if len(states_list) > 0:
            from joblib import Parallel, delayed
            import parallel

            joblib_args = list_split(
                    [self._get_joblib_pair(s) for s in states_list],
                    num_procs)

            parallel.model = self
            parallel.args = joblib_args

            allstats = Parallel(n_jobs=num_procs,backend='multiprocessing')\
                    (delayed(parallel._get_stats)(idx) for idx in range(len(joblib_args)))

            for s, stats in zip(
                    [s for grp in list_split(states_list) for s in grp],
                    [s for grp in allstats for s in grp]):
                s.all_expected_stats = stats
runCRescal.py 文件源码 项目:sictf 作者: malllabiisc 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def RelationReport(Tensor,RelationTensor,matrixA,dictIndextoVP,dictVPtoIndex,lambdaFolderName,runDir,num_cores_for_fit_computation):
    numVP = len(RelationTensor)
    # dummy code to help parallelize
    RelIndexFitReport = [] # List of index to fit, indices to be sorted based on fit [(verbPhrase, relNum, tensorSliceNorm, normResidueSlice, Fit), tuples]
    # for relIndex in range(0,numVP):
    #   verbPhrase,relNum,tensorSliceNorm, normResidueSlice, Fit = computeSliceFit(Tensor[relIndex],RelationTensor[relIndex],matrixA,dictIndextoVP,relIndex)
    #   RelIndexFitReport.append((verbPhrase,relNum,tensorSliceNorm, normResidueSlice, Fit))
    RelIndexFitReport = Parallel(n_jobs=num_cores_for_rescal, verbose=1)(delayed(cheaplyComputeSliceFit)(Tensor[relIndex],RelationTensor[relIndex],matrixA,dictIndextoVP,dictIndextoNP,relIndex) for relIndex in range(0,numVP))
    RelIndexFitReport.sort(key = lambda x:x[4],reverse=True) # sort based on fit of relations
    # print(RelIndexFitReport) # check whether sorted.
    # print('Printing Path')
    # print(os.path.join(lambdaFolderName,runDir,'RelationReport.txt'))
    # Writing old relation Report to a file
    RelationReportHandle = open(os.path.join(os.getcwd(),lambdaFolderName,runDir,'RelationReport.txt'),'w')
    for lineInfo in RelIndexFitReport:
        line = 'Relation: '+ str(lineInfo[0])+'\t' +' Relation Number: '+ str(lineInfo[1])+'\t' +' sliceNorm: '+str(lineInfo[2])+'\t' +'errorNorm: '+str(lineInfo[3])+'\t'+' SlicewiseFit: '+str(lineInfo[4])+'\n'
        print(line)
        RelationReportHandle.write(line)
    RelationReportHandle.close()
    return RelIndexFitReport
rais.py 文件源码 项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch 作者: wmingwei 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def rais(self, data, step = 1000, M = 100, parallel = False, seed = None):
        num_data = data.shape[0]
        result = 0
        if not parallel:
            p = []
            for i in range(M):
                logw = self.mcmc_r(data, step, num_data)
                p.append(logw)

            p = np.array(p)
            logmeanp = logmeanexp(p, axis = 0)
        else:
            num_cores = multiprocessing.cpu_count()

            p = Parallel(n_jobs=num_cores)(delayed(self.mcmc_r)(v = data, step = step, num_data = num_data, seed = seed) for i in range(M))

            p = np.array(p)

            logmeanp = logmeanexp(p, axis = 0)

        result = logmeanp.mean()

        return result
ais.py 文件源码 项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch 作者: wmingwei 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def ais(rbm, step = 100, M = 100, parallel = False, seed = None):

    W = rbm.W.data.numpy().T
    v_bias = rbm.v_bias.data.numpy()
    h_bias = rbm.h_bias.data.numpy()

    logZ0 = np.log((1+np.exp(v_bias))).sum() + np.log(1+np.exp(h_bias)).sum()
    ratio = []
    if parallel:
        num_cores = multiprocessing.cpu_count()

        results = Parallel(n_jobs=num_cores)(delayed(mcmc)(step = step, seed = seed, W = W, h_bias = h_bias, v_bias = v_bias) for i in range(M))


        results = np.array(results).reshape(len(results), 1)
        logZ = logZ0 + logmeanexp(results, axis = 0)
    else:
        for i in range(M):
            ratio.append(mcmc(step, seed = seed,  W = W, h_bias = h_bias, v_bias = v_bias))

        ratio = np.array(ratio).reshape(len(ratio),1)
        logZ = logZ0 + logmeanexp(ratio, axis = 0)

    return logZ
rais_dbn.py 文件源码 项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch 作者: wmingwei 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def rais(self, data, step = 1000, M = 100, parallel = False, seed = None):
        num_data = data.shape[0]
        result = 0
        if not parallel:
            p = []
            for i in range(M):
                logw = self.mcmc_r(data, step, num_data)
                p.append(logw)

            p = np.array(p)
            logmeanp = logmeanexp(p, axis = 0)
        else:
            num_cores = multiprocessing.cpu_count()

            p = Parallel(n_jobs=num_cores)(delayed(self.mcmc_r)(v = data, step = step, num_data = num_data, seed = seed) for i in range(M))

            p = np.array(p)

            logmeanp = logmeanexp(p, axis = 0)

        result = logmeanp.mean()

        return result
rais_dbn.py 文件源码 项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch 作者: wmingwei 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def ulogprob(v_input, dbn, M = 1000, parallel = False):
    logw = np.zeros([M, len(v_input)])
    # samples = v_input
    if not parallel:
        for i in range(M):
            # samples = v_input
            # for l in range(dbn.n_layers-1):
            #     logw[i,:] += -dbn.rbm_layers[l].free_energy(samples,dbn.rbm_layers[l].W)[0]
            #     samples = dbn.rbm_layers[l].sample_h_given_v(samples,dbn.rbm_layers[l].W,dbn.rbm_layers[l].h_bias)[0]
            #     logw[i,:] -= -dbn.rbm_layers[l].free_energy_hidden(samples,dbn.rbm_layers[l].W)[0]
            # logw[i,:] += -dbn.rbm_layers[-1].free_energy(samples,dbn.rbm_layers[-1].W)[0]
            logw[i,:] += important_sampling(v_input, dbn)
    else:
        num_cores = multiprocessing.cpu_count()

        results = Parallel(n_jobs=num_cores)(delayed(important_sampling)(v_input = v_input, dbn = dbn) for i in range(M))
        logw += np.asarray(results)

    return logmeanexp(logw,0)
ais.py 文件源码 项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch 作者: wmingwei 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def ais(rbm, step = 100, M = 100, parallel = False, seed = None):

    W = rbm.W.data.numpy().T
    v_bias = rbm.v_bias.data.numpy()
    h_bias = rbm.h_bias.data.numpy()

    logZ0 = np.log((1+np.exp(v_bias))).sum() + np.log(1+np.exp(h_bias)).sum()
    ratio = []
    if parallel:
        num_cores = multiprocessing.cpu_count()

        results = Parallel(n_jobs=num_cores)(delayed(mcmc)(step = step, seed = seed, W = W, h_bias = h_bias, v_bias = v_bias) for i in range(M))


        results = np.array(results).reshape(len(results), 1)
        logZ = logZ0 + logmeanexp(results, axis = 0)
    else:
        for i in range(M):
            ratio.append(mcmc(step, seed = seed,  W = W, h_bias = h_bias, v_bias = v_bias))

        ratio = np.array(ratio).reshape(len(ratio),1)
        logZ = logZ0 + logmeanexp(ratio, axis = 0)

    return logZ
alignStats.py 文件源码 项目:binf-scripts 作者: lazappi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def main():
    """
    Main function.

    1. Setup logging
    2. Get arguments
    3. Get index
    4. Process files
    5. Write output
    """

    setup_logging()

    logger = logging.getLogger("stats." + __name__)

    args = get_args()

    index = get_index(args)

    logger.warning("Positions not in annotation will be ignored.")

    logger.info("Found " + str(len(args.inputs)) + " input file(s):")
    for input_file in sorted(args.inputs):
        logger.debug(input_file)

    if args.is_parallel:
        stats = Parallel(n_jobs=args.parallel,
                         verbose=100,
                         batch_size=1)(delayed(process_file)(input_file,
                                                             args.type,
                                                             index,
                                                             args.is_parallel)
                                       for input_file in args.inputs)
    else:
        stats = []
        for input_file in args.inputs:
            output_table = process_file(input_file, args.type, index,
                                        args.is_parallel)
            stats.append(output_table)

    write_stats(args.out, stats)
optimalK.py 文件源码 项目:gap_statistic 作者: milesgranger 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _process_with_joblib(self, X: Union[pd.DataFrame, np.ndarray], n_refs: int, cluster_array: np.ndarray):
        """
        Process calling of .calculate_gap() method using the joblib backend
        """
        with Parallel(n_jobs=self.n_jobs) as parallel:
            for gap_value, n_clusters in parallel(delayed(self._calculate_gap)(X, n_refs, n_clusters)
                                                  for n_clusters in cluster_array):
                yield (gap_value, n_clusters)
mc_func.py 文件源码 项目:CRN_ProbabilisticInversion 作者: elaloy 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward_parallel(forward_process,X,n,n_jobs,extra_par): 

    n_row=X.shape[0]

    parallelizer = Parallel(n_jobs=n_jobs)

    tasks_iterator = ( delayed(forward_process)(X_row,n,extra_par) 
                      for X_row in np.split(X,n_row))

    result = parallelizer( tasks_iterator )
    # Merging the output of the jobs
    return np.vstack(result)
mcmc_func.py 文件源码 项目:CRN_ProbabilisticInversion 作者: elaloy 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def forward_parallel(forward_process,X,n,n_jobs,extra_par): 

    n_row=X.shape[0]

    parallelizer = Parallel(n_jobs=n_jobs)

    tasks_iterator = ( delayed(forward_process)(X_row,n,extra_par) 
                      for X_row in np.split(X,n_row))

    result = parallelizer( tasks_iterator )
    # Merging the output of the jobs
    return np.vstack(result)
augment_batch_iterator.py 文件源码 项目:face_detection 作者: chintak 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def transform(self, Xb, yb):
        X_n, y_n = super(AugmentBatchIterator, self).transform(Xb, yb)
        ret = Parallel(n_jobs=-1)(delayed(load_augment_im)(self, name, bb)
                                  for name, bb in zip(X_n, y_n))
        Xb = np.asarray(map(lambda v: v[0], ret))
        yb = np.asarray(map(lambda v: v[1], ret))
        return Xb, yb
lazy_batch_iterator.py 文件源码 项目:face_detection 作者: chintak 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def transform(self, Xb, yb):
        X_n, yb = super(LazyBatchIterator, self).transform(Xb, yb)
        Xb = Parallel(n_jobs=-1)(delayed(load_im_f)(name)
                                 for name in X_n)
        Xb = np.asarray(Xb)
        return Xb, yb
create_lmdb.py 文件源码 项目:face_detection 作者: chintak 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def process_batch(image_db, label_db, fnames_b, y_b):
    print "Reading the images and labels"
    with Parallel(n_jobs=-1) as parallel:
        Xb = parallel(delayed(load_im_tuple)
                      (fname, i) for i, fname in fnames_b)
        yb = parallel(delayed(load_y_tuple)(y, i) for i, y in y_b)
    print "Writing image data"
    _write_batch_lmdb(image_db, Xb)
    print "Writing label data"
    _write_batch_lmdb(label_db, yb)
utils.py 文件源码 项目:face_detection 作者: chintak 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def get_file_list(folder):
    names = os.listdir(folder)
    fnames = []
    bboxes = []
    bbox_names = map(lambda name: os.path.join(
        folder, name, '_bboxes.txt'), names)
    with Parallel(n_jobs=-1) as parallel:
        dfs = parallel(delayed(_extract_names_bboxes)(bname)
                       for bname in bbox_names)
    df = pd.concat(dfs, ignore_index=True)
    df['Flag'] = df['Name'].map(lambda x: True if os.path.exists(x) else False)
    print "Initial number of images:", df['Name'].count()
    df = df[df['Flag'] == True]
    print "Total number of existing images:", df['Name'].count()
    return df['Name'].values, df['BBox'].values
pickle_data.py 文件源码 项目:wikilinks 作者: trovdimi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def pickle_rel_data():

    #Parallel(n_jobs=9, backend="multiprocessing")(delayed(pickle_rel)(rel_feature) for rel_feature in
    #        ['rel_degree','rel_in_degree','rel_out_degree','rel_page_rank','rel_local_clust','rel_eigen_centr',
    #                    'rel_hits_hub','rel_hits_authority','rel_kcore'])

    Parallel(n_jobs=3, backend="multiprocessing")(delayed(pickle_rel)(rel_feature) for rel_feature in
                                                  ['rel_degree','rel_in_degree','rel_out_degree'])

    #Parallel(n_jobs=3, backend="multiprocessing")(delayed(pickle_rel)(rel_feature) for rel_feature in
    #        ['rel_hits_hub','rel_hits_authority','rel_kcore'])

    #Parallel(n_jobs=3, backend="multiprocessing")(delayed(pickle_rel)(rel_feature) for rel_feature in
    #        ['rel_page_rank','rel_local_clust','rel_eigen_centr'])
pickle_data.py 文件源码 项目:wikilinks 作者: trovdimi 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def pickle_vis_data():
    pickle_vis_data_pandas()
    Parallel(n_jobs=5, backend="multiprocessing")(delayed(pickle_viz)(rel_feature) for rel_feature in
                                                  ['infobox','lead','left-body','navbox', 'body'])
    #Parallel(n_jobs=3, backend="multiprocessing")(delayed(pickle_viz_positions)(rel_feature) for rel_feature in
    #                                              ['links_postions_text','links_postions_x','links_postions_y'])
run.py 文件源码 项目:ParlAI 作者: facebookresearch 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def batch_train(opt, round_index, round_train_data, round_valid_data, round_valid_weights=None, save_all=True, file_indices=None, return_acc_len=False, seq2seq=False):
    i = 0
    perfs = []
    M = len(round_train_data)
    while i < M:
        j = min(i + opt['num_machines'], M)
        cur_perfs = Parallel(n_jobs=j - i, backend='threading') \
            (delayed(train)(opt, round_index, train_index, file_indices[train_index] if file_indices else train_index, round_train_data[train_index], round_valid_data[train_index], valid_weights=round_valid_weights[train_index] if round_valid_weights else None, save_all=save_all, return_acc_len=return_acc_len, seq2seq=seq2seq) \
                for train_index in range(i, j))
        perfs.extend(cur_perfs)
        i = j

    error_indices, valid_indices = [], []
    for i, perf in enumerate(perfs):
        if perf == 0.0 or type(perf) == tuple and perf[0] == 0.0:
            error_indices.append(i)
        elif i < opt['num_machines']:
            valid_indices.append(i)

    M = len(error_indices)
    TMP_NUM_MACHINES = len(valid_indices)
    if M > 0 and TMP_NUM_MACHINES > 0:
        i = 0
        error_perfs = []
        while i < M:
            j = min(i + TMP_NUM_MACHINES, M)
            cur_perfs = Parallel(n_jobs=j - i, backend='threading') \
                (delayed(train)(opt, round_index, valid_indices[train_index], file_indices[error_indices[train_index]] if file_indices else error_indices[train_index], round_train_data[error_indices[train_index]], round_valid_data[error_indices[train_index]], valid_weights=round_valid_weights[error_indices[train_index]] if round_valid_weights else None, save_all=save_all, return_acc_len=return_acc_len, seq2seq=seq2seq) \
                    for train_index in range(i, j))
            error_perfs.extend(cur_perfs)
            i = j
        for i in range(M):
            perfs[error_indices[i]] = error_perfs[i]

    return perfs


问题


面经


文章

微信
公众号

扫码关注公众号