python类int()的实例源码

nn1_stress_test.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def pad_batch(mini_batch):
    mini_batch_size = len(mini_batch)
#     print mini_batch.shape
#     print mini_batch
    max_sent_len1 = int(np.max([len(x[0]) for x in mini_batch]))
    max_sent_len2 = int(np.max([len(x[1]) for x in mini_batch]))
#     print max_sent_len1, max_sent_len2
#     max_token_len = int(np.mean([len(val) for sublist in mini_batch for val in sublist]))
    main_matrix1 = np.zeros((mini_batch_size, max_sent_len1), dtype= np.int)
    main_matrix2 = np.zeros((mini_batch_size, max_sent_len2), dtype= np.int)
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[0]):
            try:
                main_matrix1[i,j] = j
            except IndexError:
                pass
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[1]):
            try:
                main_matrix2[i,j] = j
            except IndexError:
                pass
    main_matrix1_t = Variable(torch.from_numpy(main_matrix1))
    main_matrix2_t = Variable(torch.from_numpy(main_matrix2))
#     print main_matrix1_t.size()
#     print main_matrix2_t.size()
    return [main_matrix1_t, main_matrix2_t]
#     return [Variable(torch.cat((main_matrix1_t, main_matrix2_t), 0))

# def pad_batch(mini_batch):
# #     print mini_batch
# #     print type(mini_batch)
# #     print mini_batch.shape
# #     for i, _ in enumerate(mini_batch):
# #         print i, _
#     return [Variable(torch.from_numpy(np.asarray(_))) for _ in mini_batch[0]]
kernels.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def getTrainTestKernel(self, params, Xtest):
        self.checkParams(params)        
        params_kernels = params[len(self.kernels):]

        #compute Kd and EE
        Kd = np.zeros((self.n, Xtest[0].shape[0], len(self.kernels)))
        params_ind = 0
        kernel_paramsArr = params[len(self.kernels):]
        for k_i, k in enumerate(self.kernels):
            numHyp = k.getNumParams()
            kernelParams_range = np.array(xrange(params_ind, params_ind+numHyp), dtype=np.int)
            kernel_params = kernel_paramsArr[kernelParams_range]            
            Kd[:,:,k_i] = k.getTrainTestKernel(kernel_params, Xtest[k_i])
            params_ind += numHyp
        EE = elsympol(Kd, len(self.kernels))

        #compute K
        K=0             
        for i in xrange(len(self.kernels)): K += np.exp(2*params[i]) * EE[:,:,i+1]          

        return K
kernels.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def getTestKernelDiag(self, params, Xtest):
        self.checkParams(params)        
        params_kernels = params[len(self.kernels):]

        #compute Kd and EE
        Kd = np.zeros((Xtest[0].shape[0], 1, len(self.kernels)))
        params_ind = 0
        kernel_paramsArr = params[len(self.kernels):]
        for k_i, k in enumerate(self.kernels):
            numHyp = k.getNumParams()
            kernelParams_range = np.array(xrange(params_ind, params_ind+numHyp), dtype=np.int)
            kernel_params = kernel_paramsArr[kernelParams_range]            
            Kd[:,0,k_i] = k.getTestKernelDiag(kernel_params, Xtest[k_i])
            params_ind += numHyp
        EE = elsympol(Kd, len(self.kernels))

        #compute K
        K=0             
        for i in xrange(len(self.kernels)): K += np.exp(2*params[i]) * EE[:,:,i+1]          
        return K
kernels.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def Kdim(self, kdimParams):
        if (self.prevKdimParams is not None and np.max(np.abs(kdimParams-self.prevKdimParams)) < self.epsilon): return self.cache['Kdim']

        K = np.zeros((self.n, self.n, len(self.kernels)))
        params_ind = 0
        for k_i, k in enumerate(self.kernels):
            numHyp = k.getNumParams()
            kernelParams_range = np.array(xrange(params_ind, params_ind+numHyp), dtype=np.int)          
            kernel_params = kdimParams[kernelParams_range]          
            if ((numHyp == 0 and 'Kdim' in self.cache) or (numHyp>0 and self.prevKdimParams is not None and np.max(np.abs(kernel_params-self.prevKdimParams[kernelParams_range])) < self.epsilon)):
                K[:,:,k_i] = self.cache['Kdim'][:,:,k_i]
            else:
                K[:,:,k_i] = k.getTrainKernel(kernel_params)                
            params_ind += numHyp
        self.prevKdimParams = kdimParams.copy()
        self.cache['Kdim'] = K
        return K
coverage.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def get_depth_info_json(info):
    fixed_info = {int(x): y for (x, y) in info.iteritems()}

    total_depth_counts = sum(fixed_info.values())
    median_depth = None
    sorted_depths = sorted(fixed_info.keys())
    seen_depth_count = 0
    mean_depth = 0.0
    for depth in sorted_depths:
        seen_depth_count += fixed_info[depth]
        mean_depth += float(depth*fixed_info[depth])/float(total_depth_counts)
        if seen_depth_count > total_depth_counts/2 and median_depth is None:
            median_depth = depth
    zero_cov_fract = tk_stats.robust_divide(float(fixed_info.get(0, 0.0)), float(total_depth_counts))

    return (mean_depth, median_depth, zero_cov_fract)
sun3d_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def frame_to_json(bboxes, targets): 
    """
    {'polygon': [{'x': [1,2,3], 'y': [2,3,4], 'object': 3}]}
    Also decorated (see decorate_frame with pretty_names, polygons, targets)
    """

    assert(len(bboxes) == len(targets))

    if len(bboxes): 
        bb = bboxes.astype(np.int32)
        return {'polygon': 
                [{'x': [int(b[0]), int(b[0]), int(b[2]), int(b[2])], 
                  'y': [int(b[1]), int(b[3]), int(b[3]), int(b[1])], 
                  'object': int(object_id)} \
                 for object_id, b in zip(targets, bb)]}
    else: 
        return {}
recognition_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def plot_confusion_matrix(cm, clf_target_names, title='Confusion matrix', cmap=plt.cm.jet):
    target_names = map(lambda key: key.replace('_','-'), clf_target_names)

    for idx in range(len(cm)): 
        cm[idx,:] = (cm[idx,:] * 100.0 / np.sum(cm[idx,:])).astype(np.int)

    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    # plt.matshow(cm)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(clf_target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)
    # plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
data_loader_test.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 51 收藏 0 点赞 0 评论 0
def load_ROI_mask(self):

        proxy = nib.load(self.FLAIR_FILE)
        image_array = np.asarray(proxy.dataobj)

        mask = np.ones_like(image_array)
        mask[np.where(image_array < 90)] = 0

        # img = nib.Nifti1Image(mask, proxy.affine)
        # nib.save(img, join(modalities_path,'mask.nii.gz'))

        struct_element_size = (20, 20, 20)
        mask_augmented = np.pad(mask, [(21, 21), (21, 21), (21, 21)], 'constant', constant_values=(0, 0))
        mask_augmented = binary_closing(mask_augmented, structure=np.ones(struct_element_size, dtype=bool)).astype(
            np.int)

        return mask_augmented[21:-21, 21:-21, 21:-21].astype('bool')
spikesorting.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __draw_pk2(self):
        self.__cleanPk2()
        if self.units is not None:
            unique_units = np.unique(self.units)
            unique_units = unique_units.tolist()
            pca_1,pca_2 = self.PCAusedList.currentText().split("-")
            pca_1 = np.int(pca_1)-1
            pca_2 = np.int(pca_2)-1
            if self.wavePCAs[0].shape[0]>2:
                xs = self.wavePCAs[:,pca_1]
                ys = self.wavePCAs[:,pca_2]
                self.PcaScatterItem = []
                seg_num = 5000
                for i,ite_unit in enumerate(unique_units):
                    mask = self.units==ite_unit
                    temp_xs = xs[mask]
                    temp_ys = ys[mask]
                    segs = int(ceil(temp_xs.shape[0]/float(seg_num)))
                    for j in range(segs):
                        temp_xs_j = temp_xs[j*seg_num:(j+1)*seg_num]
                        temp_ys_j = temp_ys[j*seg_num:(j+1)*seg_num]
                        self.PcaScatterItem.append(pg.ScatterPlotItem(temp_xs_j,temp_ys_j,pen=self.colors[ite_unit],brush=self.colors[ite_unit],size=3,symbol="o"))
                for i in range(len(self.PcaScatterItem)):
                    self.pk2.addItem(self.PcaScatterItem[i])
spikesorting.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __unitsNumChanged(self):
        if hasattr(self,"auto_result"):
            if self.autoSortThisCheck.isChecked():

                self.saveChannelCheck.setChecked(False)
                self.chnResultPool.pop(self.selectChan,None)

                self.units = self.auto_result.copy()
                self.units[self.units>int(self.unitsNumWgt.currentText())] = 0

                self.__draw_pk3()
                self.__update_pk3_roi()
                self.__draw_pk2()
                if self.pca_3d is True:
                    self.__draw_3D_PCA()


# The SpikeSorting class use this class to draw multiple lines quickly in a memory-efficient way.
brainwaref32io.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def __read_condition(self):
        '''
        Read the parameter values for a single stimulus condition.

        Returns nothing.
        '''
        # float32 -- SpikeTrain length in ms
        self.__t_stop = np.fromfile(self._fsrc, dtype=np.float32, count=1)[0]

        # float32 -- number of stimulus parameters
        numelements = int(np.fromfile(self._fsrc, dtype=np.float32,
                                      count=1)[0])

        # [float32] * numelements -- stimulus parameter values
        paramvals = np.fromfile(self._fsrc, dtype=np.float32,
                                count=numelements).tolist()

        # organize the parameers into a dictionary with arbitrary names
        paramnames = ['Param%s' % i for i in range(len(paramvals))]
        self.__params = dict(zip(paramnames, paramvals))
spikesorting.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def __draw_pk2(self):
        self.__cleanPk2()
        if self.units is not None:
            unique_units = np.unique(self.units)
            unique_units = unique_units.tolist()
            pca_1,pca_2 = self.PCAusedList.currentText().split("-")
            pca_1 = np.int(pca_1)-1
            pca_2 = np.int(pca_2)-1
            if self.wavePCAs[0].shape[0]>2:
                xs = self.wavePCAs[:,pca_1]
                ys = self.wavePCAs[:,pca_2]
                self.PcaScatterItem = []
                seg_num = 5000
                for i,ite_unit in enumerate(unique_units):
                    mask = self.units==ite_unit
                    temp_xs = xs[mask]
                    temp_ys = ys[mask]
                    segs = int(ceil(temp_xs.shape[0]/float(seg_num)))
                    for j in range(segs):
                        temp_xs_j = temp_xs[j*seg_num:(j+1)*seg_num]
                        temp_ys_j = temp_ys[j*seg_num:(j+1)*seg_num]
                        self.PcaScatterItem.append(pg.ScatterPlotItem(temp_xs_j,temp_ys_j,pen=self.colors[ite_unit],brush=self.colors[ite_unit],size=3,symbol="o"))
                for i in range(len(self.PcaScatterItem)):
                    self.pk2.addItem(self.PcaScatterItem[i])
spikesorting.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __unitsNumChanged(self):
        if hasattr(self,"auto_result"):
            if self.autoSortThisCheck.isChecked():

                self.saveChannelCheck.setChecked(False)
                self.chnResultPool.pop(self.selectChan,None)

                self.units = self.auto_result.copy()
                self.units[self.units>int(self.unitsNumWgt.currentText())] = 0

                self.__draw_pk3()
                self.__update_pk3_roi()
                self.__draw_pk2()
                if self.pca_3d is True:
                    self.__draw_3D_PCA()


# The SpikeSorting class use this class to draw multiple lines quickly in a memory-efficient way.
klustakwikio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _load_spike_times(self, fetfilename):
        """Reads and returns the spike times and features"""
        f = file(fetfilename, 'r')

        # Number of clustering features is integer on first line
        nbFeatures = int(f.readline().strip())

        # Each subsequent line consists of nbFeatures values, followed by
        # the spike time in samples.
        names = ['fet%d' % n for n in xrange(nbFeatures)]
        names.append('spike_time')

        # Load into recarray
        data = mlab.csv2rec(f, names=names, skiprows=1, delimiter=' ')
        f.close()

        # get features
        features = np.array([data['fet%d' % n] for n in xrange(nbFeatures)])

        # Return the spike_time column
        return data['spike_time'], features.transpose()
trainModel.py 文件源码 项目:Sound-classification-on-Raspberry-Pi-with-Tensorflow 作者: GianlucaPaolocci 项目源码 文件源码 阅读 103 收藏 0 点赞 0 评论 0
def parse_audio_files(parent_dir,sub_dirs,file_ext='*.wav'):
    ignored = 0
    features, labels, name = np.empty((0,161)), np.empty(0), np.empty(0)
    for label, sub_dir in enumerate(sub_dirs):
        print sub_dir
        for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):
            try:
                mfccs, chroma, mel, contrast, tonnetz = extract_features(fn)
                ext_features = np.hstack([mfccs, chroma, mel, contrast, tonnetz])
                features = np.vstack([features,ext_features])
                l = [fn.split('-')[1]] * (mfccs.shape[0])
                labels = np.append(labels, l)
        except (KeyboardInterrupt, SystemExit):
        raise
            except:
                ignored += 1
    print "Ignored files: ", ignored
    return np.array(features), np.array(labels, dtype = np.int)
motif_tools.py 文件源码 项目:mbin 作者: fanglab 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def shorten_motifs( contig_motifs, highscore_motifs ):
    """
    Keep only the shortest, most concise version of the high scoring
    motifs (reduces redundancy).
    """
    keeper_motifs    = set(highscore_motifs.keys())
    if len(highscore_motifs)>0:
        shortest_contiguous = min([len(m.split("-")[0]) for m in highscore_motifs.keys()])
        # (1) Sort by keys; shortest motif to longest
        motifs_s = sorted(highscore_motifs, key=len)
        # (2) For each motif, check if it's contained in a longer version of other motifs
        for m in motifs_s:
            motif_str =     m.split("-")[0]
            motif_idx = int(m.split("-")[1])
            for remaining in list(keeper_motifs):
                remaining_str =     remaining.split("-")[0]
                remaining_idx = int(remaining.split("-")[1])
                match         = re.search(motif_str, remaining_str)
                if match != None and (motif_idx + match.start()) == remaining_idx and len(remaining_str) > len(motif_str):
                    # 3. If True, remove the longer version
                    keeper_motifs.remove(remaining)
    return keeper_motifs
process.py 文件源码 项目:seqhawkes 作者: mlukasik 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def spontaneousnode_count(
    infecting_vec,
    infected_vec,
    node_vec,
    D,
    ):
    '''
    Returns a vector with count of spontanous infections for nodes.

    Arguments:
    infecting_vec - vector of infecting event ids
    infected_vec - vector of event ids
    node_vec - vector of infected node ids
    D - number of nodes
    '''

    spontaneous_nodes = node_vec[infecting_vec == infected_vec]
    updates = np.zeros((D, 1))
    for node in spontaneous_nodes:
        updates[int(node)] += 1
    return updates
process.py 文件源码 项目:seqhawkes 作者: mlukasik 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def spontaneousmeme_count(
    infecting_vec,
    infected_vec,
    eventmemes,
    M,
    ):
    '''
    Returns a vector with count of spontanous infections for memes.

    Arguments:
    infecting_vec - vector of infecting event ids
    infected_vec - vector of event ids
    eventmemes - vector of meme ids
    M - number of memes
    '''

    spontaneous_memes = eventmemes[infecting_vec == infected_vec]
    updates = np.zeros((M, 1))
    for meme in spontaneous_memes:
        updates[int(meme)] += 1
    return updates
process.py 文件源码 项目:seqhawkes 作者: mlukasik 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def infecting_node(infected_vec, infecting_vec, node_vec):
    '''
    Returns a vector of nodes of infecting events.

    Arguments:
    infecting_vec - vector of infecting event ids
    infected_vec - vector of event ids
    node_vec - vector of infected node ids
    '''

    infecting_node_vec = []
    eventid_to_node = {}

    for (evid, inf_evid, nodeid) in izip(infected_vec, infecting_vec,
            node_vec):
        eventid_to_node[int(evid)] = nodeid
        infecting_node_vec.append(eventid_to_node[int(inf_evid)])
    infecting_node_vec = np.array(infecting_node_vec).flatten()
    return (infecting_node_vec, eventid_to_node)
train_svms.py 文件源码 项目:adversarial-frcnn 作者: xiaolonw 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def parse_args():
    """
    Parse input arguments
    """
    parser = argparse.ArgumentParser(description='Train SVMs (old skool)')
    parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
                        default=0, type=int)
    parser.add_argument('--def', dest='prototxt',
                        help='prototxt file defining the network',
                        default=None, type=str)
    parser.add_argument('--net', dest='caffemodel',
                        help='model to test',
                        default=None, type=str)
    parser.add_argument('--cfg', dest='cfg_file',
                        help='optional config file', default=None, type=str)
    parser.add_argument('--imdb', dest='imdb_name',
                        help='dataset to train on',
                        default='voc_2007_trainval', type=str)

    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()
    return args
load_feature.py 文件源码 项目:EmotiW-2017-Audio-video-Emotion-Recognition 作者: xujinchang 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def load_Y(X_signals_paths):
    """
    Given attribute (train or test) of feature, read all 9 features into an
    np ndarray of shape [sample_sequence_idx, time_step, feature_num]
        argument:   X_signals_paths str attribute of feature: 'train' or 'test'
        return:     np ndarray, tensor of features
    """
    X_signals = []

    for signal_type_path in X_signals_paths:
        file = open(signal_type_path, 'rb')
        # Read dataset from disk, dealing with text files' syntax
        X_signals.append(
            [np.array(serie, dtype=np.int) for serie in [
                row.strip().split(' ') for row in file
            ]]
        )
        file.close()


    return np.concatenate((X_signals[0],X_signals[1],X_signals[2],X_signals[3],X_signals[4],X_signals[5],X_signals[6]),axis=0)
plot.py 文件源码 项目:sound_field_analysis-py 作者: QULab 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def plot2D(data, title=None, viz_type=None, fs=None, line_names=None):
    """Visualize 2D data using plotly.

    Parameters
    ----------
    data : array_like
       Data to be plotted, separated along the first dimension (rows).
    title : string
       Add title to be displayed on plot
    type : string{None, 'time', 'linFFT', 'logFFT'}
       Type of data to be displayed. [Default: None]
    fs : int
       Sampling rate in Hz. [Default: 44100]
    """

    layout = layout_2D(viz_type, title)
    traces = prepare_2D_traces(data, viz_type, fs, line_names=line_names)

    showTrace(traces, layout=layout, title=title)
sph.py 文件源码 项目:sound_field_analysis-py 作者: QULab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def sph_harm_all(nMax, az, el, type='complex'):
    '''Compute all sphercial harmonic coefficients up to degree nMax.

    Parameters
    ----------
    nMax : (int)
        Maximum degree of coefficients to be returned. n >= 0

    az: (float), array_like
        Azimuthal (longitudinal) coordinate [0, 2pi], also called Theta.

    el : (float), array_like
        Elevation (colatitudinal) coordinate [0, pi], also called Phi.

    Returns
    -------
    y_mn : (complex float), array_like
        Complex spherical harmonics of degrees n [0 ... nMax] and all corresponding
        orders m [-n ... n], sampled at [az, el]. dim1 corresponds to az/el pairs,
        dim2 to oder/degree (m, n) pairs like 0/0, -1/1, 0/1, 1/1, -2/2, -1/2 ...
    '''
    m, n = mnArrays(nMax)
    mA, azA = _np.meshgrid(m, az)
    nA, elA = _np.meshgrid(n, el)
    return sph_harm(mA, nA, azA, elA, type=type)
sph.py 文件源码 项目:sound_field_analysis-py 作者: QULab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def kr_full_spec(fs, radius, NFFT, temperature=20):
    """Returns full spectrum kr

    Parameters
    ----------
    fs : int
       Sampling rate in Hertz
    radius : float
       Radius
    NFFT : int
       Number of frequency bins
    temperature : float, optional
       Temperature in degree Celcius (Default: 20 C)

    Returns
    -------
    kr : array_like
       kr vector of length NFFT/2 + 1 spanning the frequencies of 0:fs/2
    """
    freqs = _np.linspace(0, fs / 2, NFFT / 2 + 1)
    return kr(freqs, radius, temperature)

# DEBUG
gen.py 文件源码 项目:sound_field_analysis-py 作者: QULab 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def whiteNoise(fftData, noiseLevel=80):
    '''Adds White Gaussian Noise of approx. 16dB crest to a FFT block.

    Parameters
    ----------
    fftData : array of complex floats
       Input fftData block (e.g. from F/D/T or S/W/G)
    noiseLevel : int, optional
       Average noise Level in dB [Default: -80dB]

    Returns
    -------
    noisyData : array of complex floats
       Output fftData block including white gaussian noise
    '''
    dimFactor = 10**(noiseLevel / 20)
    fftData = _np.atleast_2d(fftData)
    channels = fftData.shape[0]
    NFFT = fftData.shape[1] * 2 - 2
    nNoise = _np.random.rand(channels, NFFT)
    nNoise = dimFactor * nNoise / _np.mean(_np.abs(nNoise))
    nNoiseSpectrum = _np.fft.rfft(nNoise, axis=1)
    return fftData + nNoiseSpectrum
gen.py 文件源码 项目:sound_field_analysis-py 作者: QULab 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def radial_filter_fullspec(max_order, NFFT, fs, array_configuration, amp_maxdB=40):
    """Generate NFFT/2 + 1 modal radial filter of orders 0:max_order for frequencies 0:fs/2, wraps radial_filter()

    Parameters
    ----------
    max_order : int
       Maximum order
    NFFT : int
       Order of FFT (number of bins), should be a power of 2.
    fs : int
       Sampling frequency
    array_configuration : ArrayConfiguration
       List/Tuple/ArrayConfiguration, see io.ArrayConfiguration
    amp_maxdB : int, optional
       Maximum modal amplification limit in dB [Default: 40]

    Returns
    -------
    dn : array_like
       Vector of modal frequency domain filter of shape [max_order + 1 x NFFT / 2 + 1]
    """

    freqs = _np.linspace(0, fs / 2, NFFT / 2 + 1)
    orders = _np.r_[0:max_order + 1]
    return radial_filter(orders, freqs, array_configuration, amp_maxdB=amp_maxdB)
gen.py 文件源码 项目:sound_field_analysis-py 作者: QULab 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def spherical_noise(gridData=None, order_max=8, spherical_harmonic_bases=None):
    ''' Returns order-limited random weights on a spherical surface

    Parameters
    ----------
    gridData : io.SphericalGrid
       SphericalGrid containing azimuth and colatitude
    order_max : int, optional
        Spherical order limit [Default: 8]

    Returns
    -------
    noisy_weights : array_like, complex
       Noisy weigths
    '''

    if spherical_harmonic_bases is None:
        if gridData is None:
            raise TypeError('Either a grid or the spherical harmonic bases have to be provided.')
        gridData = SphericalGrid(*gridData)
        spherical_harmonic_bases = sph_harm_all(order_max, gridData.azimuth, gridData.colatitude)
    else:
        order_max = _np.int(_np.sqrt(spherical_harmonic_bases.shape[1]) - 1)
    return _np.inner(spherical_harmonic_bases, _np.random.randn((order_max + 1) ** 2) + 1j * _np.random.randn((order_max + 1) ** 2))
NNVis.py 文件源码 项目:MLPractices 作者: carefree0910 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def place_graph(graphs, half_block_width, img, i, j, x, y):
    """
    Rendering neuron block
    :param graphs           : Neuron graphs
    :param half_block_width : int(neuron_block_width / 2)
    :param img              : Canvas
    :param i                : i-th hidden layer  
    :param j                : j-th neuron in i-th hidden layer
    :param x                : (x, y) is the center of the neuron graph on the canvas
    :param y                : (x, y) is the center of the neuron graph on the canvas
    :return                 : None 
    """
    ############################################################
    #                  Write your code here!                   #
    ############################################################

    pass

    ############################################################
    #                           End                            #
    ############################################################
NNVis.py 文件源码 项目:MLPractices 作者: carefree0910 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def place_graph(graphs, half_block_width, img, i, j, x, y):
    """
    Render neuron graph
    :param graphs           : Neuron graphs
    :param half_block_width : int(neuron_graph_width / 2)
    :param img              : Canvas
    :param i                : i-th hidden layer
    :param j                : j-th neuron in i-th hidden layer
    :param x                : (x, y) is the center of the neuron graph on the canvas
    :param y                : (x, y) is the center of the neuron graph on the canvas
    """
    ############################################################
    #                  Write your code here!                   #
    ############################################################

    graph = graphs[i][j]
    img[y - half_block_width:y + half_block_width, x - half_block_width:x + half_block_width] = graph

    ############################################################
    #                           End                            #
    ############################################################
Routines.py 文件源码 项目:structured-output-ae 作者: sbelharbi 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def construct_train_valid_sets(self, data, p=1/3.):
        ''' Construct train/valid sets from a dataset, by selecting p% of samples as valid, and the rest for train.mat

        data: tuple (X,Y)
        p: real ]0,1[, the pourcentage of samples to take as validation.

        The samples will be selected randomly.
        '''
        x = data[0]
        y = data[1]
        nbr = x.shape[0]
        nbr_vald = int(nbr * p)
        index = np.arange(nbr)
        # shuffle the index
        np.random.shuffle(index)
        idx_valid = index[:nbr_vald]
        idx_train = index[nbr_vald:]

        x_train = x[idx_train]
        y_train = y[idx_train]

        x_valid = x[idx_valid]
        y_valid = y[idx_valid]

        return [(x_train, y_train), (x_valid, y_valid)]


问题


面经


文章

微信
公众号

扫码关注公众号