realigner.bak.py 文件源码

python
阅读 21 收藏 0 点赞 0 评论 0

项目:CLAM 作者: Xinglab 项目源码 文件源码
def construct_subgraph(mbam, read_qname, mread_dict, processed_mreads, chr_list, winsize=50, unstranded=False):
    """DOCSTRING
    Args:
    Returns:
    """
    # record of processed alignments only need kept on within-subgraph level
    processed_mread_alignments = set()
    counter = 0
    # a list of `pysam.AlignedSegment` objects
    # note that all taggers are already stored in `pysam.AlignedSegment.opt('RT')`
    read_aln_list = [x for x in mread_dict[read_qname]] 
    processed_mreads.add(read_qname)
    read_to_locations = defaultdict(dict) # read_qname -> {node_name1:segment1, node_name2:segment2}

    # enumerate all connected components
    while True:
        counter+=1; print "%i: %i"%(counter, len(read_aln_list))
        next_read_aln_list = []

        gen = read_aln_list if len(read_aln_list)<200 else tqdm(read_aln_list)
        for alignment in gen:
            ## build a node for this mread alignment 
            ## (if not already processed, i.e. built before)
            if alignment in processed_mread_alignments:
                continue

            genomic_cluster, this_mread_dict, discarded_mread_list = \
                build_read_cluster(alignment, chr_list, mbam, unstranded=unstranded, winsize=winsize)
            _ = map(processed_mread_alignments.add, discarded_mread_list)
            if genomic_cluster is None:  # this cluster is invald (only double-mappers)
                continue

            ## update loc2read, read2loc
            node_name = ':'.join([str(x) for x in genomic_cluster])
            #if node_name in subgraph:
            #   logger.debug("I revisited '%s' at read '%s'."%(node_name, read_qname))
            #   break
            #subgraph.add(node_name)
            for x_qname in this_mread_dict:
                read_to_locations[x_qname].update({node_name :  this_mread_dict[x_qname]})

            ## then add new alignments(edges) to generate connected nodes
            ## in the next iteration
            _ = map(processed_mread_alignments.add, this_mread_dict.values())
            for read_x_qname in this_mread_dict:
                if read_x_qname in processed_mreads:
                    continue
                x_aln_list = [aln for aln in mread_dict[read_x_qname] if not aln in processed_mread_alignments]
                next_read_aln_list.extend(x_aln_list)

            ## .. and record to processed reads since we have generated
            ## the nodes for them
            _ = map(processed_mreads.add, this_mread_dict.keys())

        # if no more connected nodes can be found, break loop 
        if len(next_read_aln_list)==0:
            break
        read_aln_list = next_read_aln_list      
    return read_to_locations, processed_mreads
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号