python类concatenate()的实例源码

ROI.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def getArrayRegion(self, arr, img=None, axes=(0,1), **kwds):
        rgns = []
        for l in self.lines:
            rgn = l.getArrayRegion(arr, img, axes=axes, **kwds)
            if rgn is None:
                continue
                #return None
            rgns.append(rgn)
            #print l.state['size']

        ## make sure orthogonal axis is the same size
        ## (sometimes fp errors cause differences)
        if img.axisOrder == 'row-major':
            axes = axes[::-1]
        ms = min([r.shape[axes[1]] for r in rgns])
        sl = [slice(None)] * rgns[0].ndim
        sl[axes[1]] = slice(0,ms)
        rgns = [r[sl] for r in rgns]
        #print [r.shape for r in rgns], axes

        return np.concatenate(rgns, axis=axes[0])
ROI.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def getArrayRegion(self, data, img, axes=(0,1), order=1, **kwds):
        """
        Use the position of this ROI relative to an imageItem to pull a slice 
        from an array.

        Since this pulls 1D data from a 2D coordinate system, the return value 
        will have ndim = data.ndim-1

        See ROI.getArrayRegion() for a description of the arguments.
        """

        imgPts = [self.mapToItem(img, h['item'].pos()) for h in self.handles]
        rgns = []
        for i in range(len(imgPts)-1):
            d = Point(imgPts[i+1] - imgPts[i])
            o = Point(imgPts[i])
            r = fn.affineSlice(data, shape=(int(d.length()),), vectors=[Point(d.norm())], origin=o, axes=axes, order=order, **kwds)
            rgns.append(r)

        return np.concatenate(rgns, axis=axes[0])
elphyio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def load_bytes(self, data_blocks, dtype='<i1', start=None, end=None, expected_size=None):
        """
        Return list of bytes contained
        in the specified set of blocks.

        NB : load all data as files cannot exceed 4Gb 
             find later other solutions to spare memory.
        """
        chunks = list()
        raw = ''
        # keep only data blocks having
        # a size greater than zero
        blocks = [k for k in data_blocks if k.size > 0]
        for data_block in blocks :
            self.file.seek(data_block.start)
            raw = self.file.read(data_block.size)[0:expected_size]
            databytes = np.frombuffer(raw, dtype=dtype)
            chunks.append(databytes)
        # concatenate all chunks and return
        # the specified slice
        if len(chunks)>0 :
            databytes = np.concatenate(chunks)
            return databytes[start:end]
        else :
            return np.array([])
AbstractNetPreprocessor.py 文件源码 项目:scientific-paper-summarisation 作者: EdCo95 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def extra_processing(self):

        data_dir = BASE_DIR + "/Data/Generated_Data/Sentences_And_SummaryBool/Abstract_Neg/AbstractNet/data.pkl"
        write_dir = BASE_DIR + \
            "/Data/Generated_Data/Sentences_And_SummaryBool/Abstract_Neg/AbstractNet/abstractnet_data.pkl"

        print("----> Reading data...")
        t = time.time()
        data = useful_functions.load_pickled_object(data_dir)
        print("----> Done, took ", time.time() - t, " seconds")

        print("----> Beginning processing...")
        t = time.time()
        self.start_time = t
        new_data = self.pool2.map(self.process_item, data)
        # new_data = np.concatenate(new_data, axis=0)
        print("----> Done, took ", (time.time() - t) / 60, " minutes")

        useful_functions.pickle_list(new_data, write_dir)
occlusion.py 文件源码 项目:DeepLearning_PlantDiseases 作者: MarkoArsenovic 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def Occlusion_exp(image,occluding_size,occluding_stride,model,preprocess,classes,groundTruth):    
    img = np.copy(image)
    height, width,_= img.shape
    output_height = int(math.ceil((height-occluding_size)/occluding_stride+1))
    output_width = int(math.ceil((width-occluding_size)/occluding_stride+1))
    ocludedImages=[]
    for h in range(output_height):
        for w in range(output_width):
            #occluder region
            h_start = h*occluding_stride
            w_start = w*occluding_stride
            h_end = min(height, h_start + occluding_size)
            w_end = min(width, w_start + occluding_size)

            input_image = copy.copy(img)
            input_image[h_start:h_end,w_start:w_end,:] =  0
            ocludedImages.append(preprocess(Image.fromarray(input_image)))

    L = np.empty(output_height*output_width)
    L.fill(groundTruth)
    L = torch.from_numpy(L)
    tensor_images = torch.stack([img for img in ocludedImages])
    dataset = torch.utils.data.TensorDataset(tensor_images,L) 
    dataloader = torch.utils.data.DataLoader(dataset,batch_size=5,shuffle=False, num_workers=8) 

    heatmap=np.empty(0)
    model.eval()
    for data in dataloader:
        images, labels = data

        if use_gpu:
            images, labels = (images.cuda()), (labels.cuda(async=True))

        outputs = model(Variable(images))
        m = nn.Softmax()
        outputs=m(outputs)
        if use_gpu:   
            outs=outputs.cpu()
        heatmap = np.concatenate((heatmap,outs[0:outs.size()[0],groundTruth].data.numpy()))

    return heatmap.reshape((output_height, output_width))
dqn_utils.py 文件源码 项目:deep-q-learning 作者: alvinwan 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _encode_observation(self, idx):
        end_idx   = idx + 1 # make noninclusive
        start_idx = end_idx - self.frame_history_len
        # this checks if we are using low-dimensional observations, such as RAM
        # state, in which case we just directly return the latest RAM.
        if len(self.obs.shape) == 2:
            return self.obs[end_idx-1]
        # if there weren't enough frames ever in the buffer for context
        if start_idx < 0 and self.num_in_buffer != self.size:
            start_idx = 0
        for idx in range(start_idx, end_idx - 1):
            if self.done[idx % self.size]:
                start_idx = idx + 1
        missing_context = self.frame_history_len - (end_idx - start_idx)
        # if zero padding is needed for missing context
        # or we are on the boundry of the buffer
        if start_idx < 0 or missing_context > 0:
            frames = [np.zeros_like(self.obs[0]) for _ in range(missing_context)]
            for idx in range(start_idx, end_idx):
                frames.append(self.obs[idx % self.size])
            return np.concatenate(frames, 2)
        else:
            # this optimization has potential to saves about 30% compute time \o/
            img_h, img_w = self.obs.shape[1], self.obs.shape[2]
            return self.obs[start_idx:end_idx].transpose(1, 2, 0, 3).reshape(img_h, img_w, -1)
data.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_data(self, dsource, sliceval):
        if self.subslice is None:
            return dsource[sliceval]
        else:
            subslice_inds = self.subsliceinds[sliceval]
            mbs = self.mini_batch_size
            bn0 = subslice_inds.min() // mbs
            bn1 = subslice_inds.max() // mbs
            stims = []
            for _bn in range(bn0, bn1 + 1):
                _s = np.asarray(dsource[_bn * mbs: (_bn + 1) * mbs])
                new_inds = isin(np.arange(_bn * mbs, (_bn + 1) * mbs), subslice_inds)
                new_array = _s[new_inds]
                stims.append(new_array)
            stims = np.concatenate(stims)
            return stims
energetics.py 文件源码 项目:piperine 作者: DNA-and-Natural-Algorithms-Group 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def calculate_unrestricted_toehold_characteristics(self):
        import stickydesign as sd
        ends = sd.easyends('TD',
                           self.length,
                           alphabet='h',
                           adjs=['c', 'g'],
                           energetics=self)
        n_ends = len(ends)
        e_array = sd.energy_array_uniform(ends, self)
        e_array = e_array[n_ends:, :n_ends]
        for i in range(n_ends):
            e_array[i,i] = 0
        e_spr = e_array.max()/self.targetdG
        e_vec_ext = self.th_external_dG(ends)
        e_vec_int = self.th_internal_dG(ends)
        e_vec_all = np.concatenate( (e_vec_int, e_vec_ext))
        e_avg = e_vec_all.mean()
        e_dev = np.max(np.abs(e_vec_all - self.targetdG))
        return e_avg, e_spr, e_dev, n_ends
energetics.py 文件源码 项目:piperine 作者: DNA-and-Natural-Algorithms-Group 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def calculate_unrestricted_toehold_characteristics(self):
        import stickydesign as sd
        ends = sd.easyends('TD',
                           self.length,
                           alphabet=self.alphabet,
                           adjs=self.adjs,
                           energetics=self)
        n_ends = len(ends)
        e_array = sd.energy_array_uniform(ends, self)
        e_array = e_array[n_ends:, :n_ends]
        for i in range(n_ends):
            e_array[i,i] = 0
        e_spr = e_array.max()/self.targetdG
        e_vec_ext = self.th_external_dG(ends)
        e_vec_int = self.th_internal_dG(ends)
        e_vec_all = np.concatenate( (e_vec_int, e_vec_ext))
        e_avg = e_vec_all.mean()
        e_dev = np.max(np.abs(e_vec_all - self.targetdG))
        return e_avg, e_dev, e_spr, n_ends
lang2vec.py 文件源码 项目:lang-reps 作者: chaitanyamalaviya 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def get_concatenated_sets(lang_codes, feature_set_str):
    feature_set_parts = feature_set_str.split("+")
    feature_names = []
    feature_values = np.ndarray((len(lang_codes),0))
    for feature_set_part in feature_set_parts:
        more_feature_names, more_feature_values = get_union_sets(lang_codes, feature_set_part)
        feature_names += more_feature_names
        feature_values = np.concatenate([feature_values, more_feature_values], axis=1)
    return feature_names, feature_values
sequence2sequence.py 文件源码 项目:lang-reps 作者: chaitanyamalaviya 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def encode_seq(self, src_seq):
        """
        Encode a single sentence
        :param src_seq: source sentence
        :return: encoded vector
        """

        src_seq_rev = list(reversed(src_seq))
        fwd_vectors = self.enc_fwd_lstm.initial_state().transduce(src_seq)
        bwd_vectors = self.enc_bwd_lstm.initial_state().transduce(src_seq_rev)
        bwd_vectors = list(reversed(bwd_vectors))
        vectors = [dynet.concatenate(list(p)) for p in zip(fwd_vectors, bwd_vectors)]
        return vectors
sequence2sequence.py 文件源码 项目:lang-reps 作者: chaitanyamalaviya 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def encode_batch_seq(self, src_seq, src_seq_rev):
        """
        Encodes a batch of sentences
        :param src_seq: batch of sentences
        :param src_seq_rev: batch of sentences in reversed order
        :return: last hidden state of the encoder
        """
        fwd_vectors = self.enc_fwd_lstm.initial_state().transduce(src_seq)
        bwd_vectors = list(reversed(self.enc_bwd_lstm.initial_state().transduce(src_seq_rev)))
        return dynet.concatenate([fwd_vectors[-1], bwd_vectors[-1]])
sequence2sequence.py 文件源码 项目:lang-reps 作者: chaitanyamalaviya 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def encode_seq(self, src_seq, src_seq_rev):
        fwd_vectors = self.enc_fwd_lstm.initial_state().transduce(src_seq)
        bwd_vectors = list(reversed(self.enc_fwd_lstm.initial_state().transduce(src_seq_rev)))
        return [dynet.concatenate(list(p)) for p in zip(fwd_vectors, bwd_vectors)]
audio_converter.py 文件源码 项目:subtitle-synchronization 作者: AlbertoSabater 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def generateDatasets(train_files, cut_data, len_mfcc, step_mfcc, hop_len, freq):

    X, Y = [], []

    for tf in train_files:

        train_data, labels = generateSingleDataset(tf, cut_data, len_mfcc, step_mfcc, hop_len, freq)

        X.append(train_data)
        Y.append(labels)

    X = np.concatenate(X)
    Y = np.concatenate(Y)

    if cut_data:
        filename = STORE_DIR + 'dataset_CUT_' + str(freq) + '_' + str(hop_len) + '_' + str(len_mfcc) + '_' + str(step_mfcc) + '_' + str(X.shape[0]) + '_' + str(X.shape[1]) + '_' + str(X.shape[2]) + '.pickle'
    else:
        filename = STORE_DIR + 'dataset_' + str(freq) + '_' + str(hop_len) + '_' + str(len_mfcc) + '_' + str(step_mfcc) + '_' + str(X.shape[0]) + '_' + str(X.shape[1]) + '_' + str(X.shape[2]) + '.pickle'
    print filename
    with open(filename, 'w') as f:
        pickle.dump([X, Y], f)

    return X, Y


# Generate a dataset from all available files
orthopoly.py 文件源码 项目:pyballd 作者: Yurlungur 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_quadrature_points(order):
    """
    Returns the quadrature points for Gauss-Lobatto quadrature
    as a function of the order of the polynomial we want to
    represent.
    See: https://en.wikipedia.org/wiki/Gaussian_quadrature
    """
    return np.sort(np.concatenate((np.array([-1,1]),
                                   poly.basis(order).deriv().roots())))
orthopoly.py 文件源码 项目:pyballd 作者: Yurlungur 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def get_integration_weights(order,nodes=None):
    """
    Returns the integration weights for Gauss-Lobatto quadrature
    as a function of the order of the polynomial we want to
    represent.
    See: https://en.wikipedia.org/wiki/Gaussian_quadrature
    See: arXive:gr-qc/0609020v1
    """
    if np.all(nodes == False):
        nodes=get_quadrature_points(order)
    if poly == polynomial.chebyshev.Chebyshev:
        weights = np.empty((order+1))
        weights[1:-1] = np.pi/order
        weights[0] = np.pi/(2*order)
        weights[-1] = weights[0]
        return weights
    elif poly == polynomial.legendre.Legendre:
        interior_weights = 2/((order+1)*order*poly.basis(order)(nodes[1:-1])**2)
        boundary_weights = np.array([1-0.5*np.sum(interior_weights)])
        weights = np.concatenate((boundary_weights,
                                  interior_weights,
                                  boundary_weights))
        return weights
    else:
        raise ValueError("Not a known polynomial type.")
        return False
thingtalk.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def constrain_value_logits(self, logits, curr_state):
        first_value_token = self.num_functions + self.num_begin_tokens + self.num_control_tokens
        num_value_tokens = self.output_size - first_value_token
        value_allowed_token_matrix = np.concatenate((self.allowed_token_matrix[:,:self.num_control_tokens], self.allowed_token_matrix[:,first_value_token:]), axis=1)

        with tf.name_scope('constrain_logits'):
            allowed_tokens = tf.gather(tf.constant(value_allowed_token_matrix), curr_state)
            assert allowed_tokens.get_shape()[1:] == (self.num_control_tokens + num_value_tokens,)

            constrained_logits = logits - tf.to_float(tf.logical_not(allowed_tokens)) * 1e+10
        return constrained_logits
convert_t7.py 文件源码 项目:crnn 作者: wulivicte 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def trans_pos(param, part_indexes, dim=0):
    parts = np.split(param, len(part_indexes), dim)
    new_parts = []
    for i in part_indexes:
        new_parts.append(parts[i])
    return np.concatenate(new_parts, dim)
format.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def import_data(data_csvs_in,
                types_csv_in,
                values_csv_in,
                groups_csv_in,
                dataset_out,
                encoding='utf-8'):
    """Import a comma-delimited list of csv files into internal treecat format.

    Common encodings include: utf-8, cp1252.
    """
    schema = load_schema(types_csv_in, values_csv_in, groups_csv_in, encoding)
    data = np.concatenate([
        load_data(schema, data_csv_in, encoding)
        for data_csv_in in data_csvs_in.split(',')
    ])
    data.flags.writeable = False
    print('Imported data shape: [{}, {}]'.format(data.shape[0], data.shape[1]))
    ragged_index = schema['ragged_index']
    for v, name in enumerate(schema['feature_names']):
        beg, end = ragged_index[v:v + 2]
        count = np.count_nonzero(data[:, beg:end].max(1))
        if count == 0:
            print('WARNING: No values found for feature {}'.format(name))
    feature_types = [TY_MULTINOMIAL] * len(schema['feature_names'])
    table = Table(feature_types, ragged_index, data)
    dataset = {
        'schema': schema,
        'table': table,
    }
    pickle_dump(dataset, dataset_out)
serving.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def sample(self, N, counts, data=None):
        size = len(self._ensemble)
        pvals = np.ones(size, dtype=np.float32) / size
        sub_Ns = np.random.multinomial(N, pvals)
        samples = np.concatenate([
            server.sample(sub_N, counts, data)
            for server, sub_N in zip(self._ensemble, sub_Ns)
        ])
        np.random.shuffle(samples)
        assert samples.shape[0] == N
        return samples


问题


面经


文章

微信
公众号

扫码关注公众号