python类asarray()的实例源码

data_loader_test.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def load_channels(self, normalize=False):

        modalities = []
        modalities.append(nib.load(self.FLAIR_FILE))
        modalities.append(nib.load(self.T1_FILE))
        modalities.append(nib.load(self.T1c_FILE))
        modalities.append(nib.load(self.T2_FILE))

        channels = np.zeros(modalities[0].shape + (4,), dtype=np.float32)

        if normalize:
            mask = self.load_ROI_mask()

        for index_mod, mod in enumerate(modalities):
            if self.data_augmentation_flag:
                channels[:, :, :, index_mod] = flip_plane(np.asarray(mod.dataobj), plane=self.data_augmentation)
            else:
                channels[:, :, :, index_mod] = np.asarray(mod.dataobj)

            if normalize:
                channels[:, :, :, index_mod] = normalize_image(channels[:, :, :, index_mod], mask=mask)

        return channels
data_loader.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def load_channels(self, normalize=False):
        modalities = []
        modalities.append(nib.load(self.T2_FILE))
        modalities.append(nib.load(self.T1_FILE))

        channels = np.zeros(modalities[0].shape + (2,), dtype=np.float32)

        for index_mod, mod in enumerate(modalities):
            if self.data_augmentation:
                channels[:, :, :, index_mod] = flip_plane(np.asarray(mod.dataobj), plane=self.data_augmentation)
            else:
                channels[:,:,:,index_mod] = np.asarray(mod.dataobj)

            if normalize:
                channels[:, :, :, index_mod] = normalize_image(channels[:,:,:,index_mod], mask = self.load_ROI_mask() )


        return channels
genderclassifier.py 文件源码 项目:namegenderclassifier 作者: joaoalvarenga 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def save(self, model_filename):
        self.__model.save("%s.model" % model_filename)
        np.save("%s.tvocab" % model_filename, np.asarray(self.__trigrams))
        np.save("%s.cvocab" % model_filename, np.asarray(self.__chars))
        np.save("%s.classes" % model_filename, np.asarray(self.__classes))
tdose_utilities.py 文件源码 项目:TDOSE 作者: kasperschmidt 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def galfit_getheadervalue(compnumber,key,headerinfo):
    """
    Return the paramters of a GALFIT model header

    --- INPUT ---
    compnumber      A string containing the component number to extract info for (number after "COMP_" in header)
    key             The key to extract (keyword after "COMPNUMBER_" in header)
    headerinfo      Header to extract info from.

    """
    hdrinfo = headerinfo[compnumber+'_'+key]

    if '*' in hdrinfo: # handling parameters fixed in GALFIT run
        hdrinfo = hdrinfo.replace('*','')

    if '+/-' in hdrinfo:
        value   = float(hdrinfo.split('+/-')[0])
        error   = float(hdrinfo.split('+/-')[1])
    else:
        value   = float(hdrinfo[1:-1])
        error   = None

    if (key == 'XC') or (key == 'YC'):
        xrange, yrange = headerinfo['FITSECT'][1:-1].split(',')
        xrange = np.asarray(xrange.split(':')).astype(float)
        yrange = np.asarray(yrange.split(':')).astype(float)
        if key == 'XC':
            value = value - xrange[0] + 1.0
        if key == 'YC':
            value = value - yrange[0] + 1.0

    return value, error
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
load_cifar10.py 文件源码 项目:GELUs 作者: hendrycks 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def to_categorical(y, nb_classes):
    y = np.asarray(y, dtype='int32')
    if not nb_classes:
        nb_classes = np.max(y)+1
    Y = np.zeros((len(y), nb_classes))
    for i in range(len(y)):
        Y[i, y[i]] = 1.
    return Y

# load training and testing data
index_data.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def make_one_hot(indices):
        '''
        Accepts an array of indices, and converts them to a one-hot matrix
        '''
        # Making indices 0 based.
        indices = numpy.asarray(indices) - min(indices)
        num_classes = max(indices) + 1
        one_hot_indices = numpy.zeros((len(indices), num_classes))
        for i, ind in enumerate(indices):
            one_hot_indices[i][ind] = 1.0
        return one_hot_indices

    # TODO: Separate methods for returning word inds and conc inds
index_data.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_embedding_matrix(self, embedding_file, onto_aware):
        # embedding_file is a tsv with words on the first column and vectors on the
        # remaining. This will add to word_embedding if for_words is true, or else to 
        # synset embedding.
        # For words that do not have vectors, we sample from a uniform distribution in the
        # range of max and min of the word embedding.
        embedding_map = {}
        rep_max = -float("inf")
        rep_min = float("inf")
        for line in gzip.open(embedding_file):
            ln_parts = line.strip().split()
            if len(ln_parts) == 2:
                continue
            element = ln_parts[0]
            vec = numpy.asarray([float(f) for f in ln_parts[1:]])
            vec_max, vec_min = vec.max(), vec.min()
            if vec_max > rep_max:
                rep_max = vec_max
            if vec_min < rep_min:
                rep_min = vec_min
            embedding_map[element] = vec
        embedding_dim = len(vec)
        target_index = self.synset_index if onto_aware else self.word_index
        # Initialize target embedding with all random vectors
        target_vocab_size = self.get_vocab_size(onto_aware=onto_aware)
        target_embedding = self.numpy_rng.uniform(low=rep_min, high=rep_max, size=(target_vocab_size, embedding_dim))
        num_found_elements = 0
        num_all_elements = 0
        for element in target_index:
            num_all_elements += 1
            if element in embedding_map:
                vec = embedding_map[element]
                target_embedding[target_index[element]] = vec
                num_found_elements += 1
        print >>sys.stderr, "Found vectors for %.4f of the words" % (float(num_found_elements) / num_all_elements)
        return target_embedding
bench_stats.py 文件源码 项目:composability_bench 作者: IntelPython 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def prepare_default(N=100, dtype=np.double):
    return ( np.asarray(np.random.rand(N, N), dtype=dtype), )
    #return toc/trials, (4/3)*N*N*N*1e-9, times
bench_stats.py 文件源码 项目:composability_bench 作者: IntelPython 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def prepare_eig(N=100, dtype=np.double):
    N/=4
    return ( np.asarray(np.random.rand(int(N), int(N)), dtype=dtype), )
bench_stats.py 文件源码 项目:composability_bench 作者: IntelPython 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def prepare_svd(N=100, dtype=np.double):
    N/=2
    return ( np.asarray(np.random.rand(int(N), int(N)), dtype=dtype), False )

#det:    return toc/trials, N*N*N*1e-9, times
bench_stats.py 文件源码 项目:composability_bench 作者: IntelPython 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def prepare_dot(N=100, dtype=np.double):
    N=N*N*10
    A = np.asarray(np.random.rand(int(N)), dtype=dtype)
    return (A, A)
    #return 1.0*toc/(trials), 2*N*N*N*1e-9, times
bench_stats.py 文件源码 项目:composability_bench 作者: IntelPython 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def prepare_dgemm(N=100, trials=3, dtype=np.double):
    LARGEDIM = int(N*2)
    KSIZE = int(N/2)
    A = np.asarray(np.random.rand(LARGEDIM, KSIZE), dtype=dtype)
    B = np.asarray(np.random.rand(KSIZE, LARGEDIM), dtype=dtype)
    return (A, B)
stream_writers.py 文件源码 项目:nidaqmx-python 作者: ni 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def write_one_sample_one_line(self, data, timeout=10):
        """
        Writes a single boolean sample to a single digital output
        channel in a task. The channel can contain only one digital
        line.

        Args:
            data (int): Specifies the boolean sample to write to the
                task.
            timeout (Optional[float]): Specifies the amount of time in
                seconds to wait for the method to write all samples.
                NI-DAQmx performs a timeout check only if the method
                must wait before it writes data. This method returns an
                error if the time elapses. The default timeout is 10
                seconds. If you set timeout to
                nidaqmx.constants.WAIT_INFINITELY, the method waits
                indefinitely. If you set timeout to 0, the method tries
                once to write the submitted samples. If the method could
                not write all the submitted samples, it returns an error
                and the number of samples successfully written.
        """
        auto_start = (self._auto_start if self._auto_start is not 
                      AUTO_START_UNSET else True)

        numpy_array = numpy.asarray([data], dtype=numpy.bool)

        return _write_digital_lines(
            self._handle, numpy_array, 1, auto_start, timeout)
stream_writers.py 文件源码 项目:nidaqmx-python 作者: ni 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def write_one_sample_port_byte(self, data, timeout=10):
        """
        Writes a single 8-bit unsigned integer sample to a single
        digital output channel in a task.

        Use this method for devices with up to 8 lines per port.

        Args:
            data (int): Specifies the 8-bit unsigned integer sample to
                write to the task.
            timeout (Optional[float]): Specifies the amount of time in
                seconds to wait for the method to write all samples.
                NI-DAQmx performs a timeout check only if the method
                must wait before it writes data. This method returns an
                error if the time elapses. The default timeout is 10
                seconds. If you set timeout to
                nidaqmx.constants.WAIT_INFINITELY, the method waits
                indefinitely. If you set timeout to 0, the method tries
                once to write the submitted samples. If the method could
                not write all the submitted samples, it returns an error
                and the number of samples successfully written.
        """
        auto_start = (self._auto_start if self._auto_start is not 
                      AUTO_START_UNSET else True)

        numpy_array = numpy.asarray([data], dtype=numpy.uint8)

        return _write_digital_u_8(
            self._handle, numpy_array, 1, auto_start, timeout)
stream_writers.py 文件源码 项目:nidaqmx-python 作者: ni 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def write_one_sample_port_uint16(self, data, timeout=10):
        """
        Writes a single 16-bit unsigned integer sample to a single
        digital output channel in a task.

        Use this method for devices with up to 16 lines per port.

        Args:
            data (int): Specifies the 16-bit unsigned integer sample to
                write to the task.
            timeout (Optional[float]): Specifies the amount of time in
                seconds to wait for the method to write all samples.
                NI-DAQmx performs a timeout check only if the method
                must wait before it writes data. This method returns an
                error if the time elapses. The default timeout is 10
                seconds. If you set timeout to
                nidaqmx.constants.WAIT_INFINITELY, the method waits
                indefinitely. If you set timeout to 0, the method tries
                once to write the submitted samples. If the method could
                not write all the submitted samples, it returns an error
                and the number of samples successfully written.
        """
        auto_start = (self._auto_start if self._auto_start is not 
                      AUTO_START_UNSET else True)

        numpy_array = numpy.asarray([data], dtype=numpy.uint16)

        return _write_digital_u_16(
            self._handle, numpy_array, 1, auto_start, timeout)
test_fields.py 文件源码 项目:pyfds 作者: emtpb 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_dimension():
    dim = fls.Dimension(3, 0.1)
    assert np.allclose(dim.vector, np.asarray([0, 0.1, 0.2]))
    assert dim.get_index(0.1) == 1
nn1_stress_test.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def gen_minibatch(tokens, features, labels, mini_batch_size, shuffle= True):
    tokens = np.asarray(tokens)[np.where(labels!=0.5)[0]]
    if type(features) is np.ndarray:
      features = np.asarray(features)[np.where(labels!=0.5)[0]]
    else:
      features = np.asarray(features.todense())[np.where(labels!=0.5)[0]]
    labels = np.asarray(labels)[np.where(labels!=0.5)[0]]
#     print tokens.shape
#     print tokens[0]
    for token, feature, label in iterate_minibatches(tokens, features, labels, mini_batch_size, shuffle = shuffle):
#         print 'token', type(token)
#         print token
        token = [_ for _ in pad_batch(token)]
#         print len(token), token[0].size(), token[1].size()
        yield token, Variable(torch.from_numpy(feature)) , Variable(torch.FloatTensor(label), requires_grad= False)
nn1_stress_test.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def gen_minibatch1(tokens, features, mini_batch_size, shuffle= True):
    tokens = np.asarray(tokens)
    features = np.asarray(features.todense())
    print(tokens.shape)
    for token, feature, label in iterate_minibatches(tokens, features, features, mini_batch_size, shuffle = shuffle):
#         print token
#         token = pad_batch(token)
#         print token
        token = [_ for _ in pad_batch(token)]
        yield token, Variable(torch.from_numpy(feature))
nn1.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def pad_batch(mini_batch):
    mini_batch_size = len(mini_batch)
#     print mini_batch.shape
#     print mini_batch
    max_sent_len1 = int(np.max([len(x[0]) for x in mini_batch]))
    max_sent_len2 = int(np.max([len(x[1]) for x in mini_batch]))
#     print max_sent_len1, max_sent_len2
#     max_token_len = int(np.mean([len(val) for sublist in mini_batch for val in sublist]))
    main_matrix1 = np.zeros((mini_batch_size, max_sent_len1), dtype= np.int)
    main_matrix2 = np.zeros((mini_batch_size, max_sent_len2), dtype= np.int)
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[0]):
            try:
                main_matrix1[i,j] = j
            except IndexError:
                pass
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[1]):
            try:
                main_matrix2[i,j] = j
            except IndexError:
                pass
    main_matrix1_t = Variable(torch.from_numpy(main_matrix1))
    main_matrix2_t = Variable(torch.from_numpy(main_matrix2))
#     print main_matrix1_t.size()
#     print main_matrix2_t.size()
    return [main_matrix1_t, main_matrix2_t]
#     return [Variable(torch.cat((main_matrix1_t, main_matrix2_t), 0))

# def pad_batch(mini_batch):
# #     print mini_batch
# #     print type(mini_batch)
# #     print mini_batch.shape
# #     for i, _ in enumerate(mini_batch):
# #         print i, _
#     return [Variable(torch.from_numpy(np.asarray(_))) for _ in mini_batch[0]]
nn1.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def gen_minibatch(tokens, features, labels, mini_batch_size, shuffle= True):
    tokens = np.asarray(tokens)[np.where(labels!=0.5)[0]]
    features = np.asarray(features.todense())[np.where(labels!=0.5)[0]]
    labels = np.asarray(labels)[np.where(labels!=0.5)[0]]
#     print tokens.shape
#     print tokens[0]
    for token, feature, label in iterate_minibatches(tokens, features, labels, mini_batch_size, shuffle = shuffle):
#         print 'token', type(token)
#         print token
        token = [_ for _ in pad_batch(token)]
#         print len(token), token[0].size(), token[1].size()
        yield token, Variable(torch.from_numpy(feature)) , Variable(torch.FloatTensor(label), requires_grad= False)


问题


面经


文章

微信
公众号

扫码关注公众号