python类fft()的实例源码

tools.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def get_freqs (signals, nbins=0):
    """ extracts relative fft frequencies and bins them in n bins
    :param signals: 1D or 2D signals
    :param nbins:  number of bins used as output (default: maximum possible)
    """
    if signals.ndim == 1: signals = np.expand_dims(signals,0)
    sfreq = use_sfreq
    if nbins == 0: nbins = int(sfreq/2)

    nsamp = float(signals.shape[1])
    assert nsamp/2 >= nbins, 'more bins than fft results' 

    feats = np.zeros((int(signals.shape[0]),nbins),dtype='float32')
    w = (fft(signals,axis=1)).real
    for i in np.arange(nbins):
        feats[:,i] =  np.sum(np.abs(w[:,np.arange(i*nsamp/sfreq,(i+1)*nsamp/sfreq, dtype=int)]),axis=1)
    sum_abs_pow = np.sum(feats,axis=1)
    feats = (feats.T / sum_abs_pow).T
    return feats
disp_mp3.py 文件源码 项目:audio_scripts 作者: audiofilter 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def get_noise(start):
    # read audio samples
    input_data = read('junk.wav')
    audio_in = input_data[1]
    samples = len(audio_in)
    intvl = (samples-start)/seg
    k = start
    sum_data = numpy.zeros(seg)
    for i in xrange(intvl):
        buffer_data = []
        for j in xrange(seg):
            buffer_data.append(audio_in[k])
            k = k+1
        cbuffer_out = fft(buffer_data)
        for j in xrange(seg):
            sq = abs(cbuffer_out[j])**2.0
            sum_data[j] = sum_data[j]+sq

    for j in xrange(seg):
        sum_data[j] = sqrt(sum_data[j]/intvl)
    return sum_data
tone_est.py 文件源码 项目:audio_scripts 作者: audiofilter 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def find_top_two_peaks(sdata):
    samples = len(sdata)
    fft_size = 2**int(floor(log(samples)/log(2.0)))
    freq = fft(sdata[0:fft_size])
    pdata = numpy.zeros(fft_size)
    for i in xrange(fft_size): pdata[i] = abs(freq[i])
    peak = 0
    peak1 = 0
    peak2 = 0
    peak1_index = 0
    peak2_index = 0
    for i in xrange(fft_size/2):
        if (pdata[i] > peak1):
            peak1 = pdata[i]
            peak1_index = i
    for i in xrange(fft_size/2):
        if (pdata[i] > peak2) and (abs(i - peak1_index) > 4):
            peak2 = pdata[i]
            peak2_index = i
    return (peak1,peak1_index,peak2,peak2_index)


# REMOVAL CASES
spectrogram.py 文件源码 项目:audio_scripts 作者: audiofilter 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def save_fft(fil,audio_in):
    samples = len(audio_in)
    fft_size = 2**int(floor(log(samples)/log(2.0)))
    freq = fft(audio_in[0:fft_size])
    s_data = numpy.zeros(fft_size/2)
    x_data = numpy.zeros(fft_size/2)
    peak = 0;
    for j in xrange(fft_size/2):
        if (abs(freq[j]) > peak):
            peak = abs(freq[j])

    for j in xrange(fft_size/2):
        x_data[j] = log(2.0*(j+1.0)/fft_size);
        if (x_data[j] < -10):
            x_data[j] = -10
        s_data[j] = 10.0*log(abs(freq[j])/peak)/log(10.0)
    plt.ylim([-50,0])
    plt.plot(x_data,s_data)
    plt.title('fft log power')
    plt.grid()

    fields = fil.split('.')
    plt.savefig(fields[0]+'_fft.png', bbox_inches="tight")
    plt.clf()
    plt.close()
spectrogram.py 文件源码 项目:audio_scripts 作者: audiofilter 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def logscale_spec(spec, sr=44100, factor=20.):
    timebins, freqbins = np.shape(spec)

    scale = np.linspace(0, 1, freqbins) ** factor
    scale *= (freqbins-1)/max(scale)
    scale = np.unique(np.round(scale))

    # create spectrogram with new freq bins
    newspec = np.complex128(np.zeros([timebins, len(scale)]))
    for i in range(0, len(scale)):
        if i == len(scale)-1:
            newspec[:,i] = np.sum(spec[:,scale[i]:], axis=1)
        else:        
            newspec[:,i] = np.sum(spec[:,scale[i]:scale[i+1]], axis=1)

    # list center freq of bins
    allfreqs = np.abs(np.fft.fftfreq(freqbins*2, 1./sr)[:freqbins+1])
    freqs = []
    for i in range(0, len(scale)):
        if i == len(scale)-1:
            freqs += [np.mean(allfreqs[scale[i]:])]
        else:
            freqs += [np.mean(allfreqs[scale[i]:scale[i+1]])]

    return newspec, freqs
tools.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 56 收藏 0 点赞 0 评论 0
def feat_eeg(signals):
    """
    calculate the relative power as defined by Leangkvist (2012),
    assuming signal is recorded with 100hz
    """
    if signals.ndim == 1: signals = np.expand_dims(signals,0)

    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    feats = np.zeros((signals.shape[0],9),dtype='float32')
    # 5 FEATURE for freq babnds
    w = (fft(signals,axis=1)).real
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    spindle = np.sum(np.abs(w[:,np.arange(12*nsamp/sfreq,14*nsamp/sfreq, dtype=int)]),axis=1)
    sum_abs_pow = delta + theta + alpha + beta + gamma + spindle
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = spindle /sum_abs_pow
    feats[:,6] = np.log10(stats.kurtosis(signals, fisher=False, axis=1))        # kurtosis
    feats[:,7] = np.log10(-np.sum([(x/nsamp)*(np.log(x/nsamp)) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    #feats[:,7] = np.polynomial.polynomial.polyfit(np.log(f[np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]), np.log(w[0,np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),1)
    feats[:,8] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5)
    if np.any(feats==np.nan): print('NaN detected')
    return np.nan_to_num(feats)
tools.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def feat_wavelet(signals):
    """
    calculate the relative power as defined by Leangkvist (2012),
    assuming signal is recorded with 100hz
    """
    if signals.ndim == 1: signals = np.expand_dims(signals,0)

    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    feats = np.zeros((signals.shape[0],8),dtype='float32')
    # 5 FEATURE for freq babnds
    w = (fft(signals,axis=1)).real
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    sum_abs_pow = delta + theta + alpha + beta + gamma
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = np.log10(stats.kurtosis(signals,fisher=False,axis=1))        # kurtosis
    feats[:,6] = np.log10(-np.sum([(x/nsamp)*(np.log(x/nsamp)) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    #feats[:,7] = np.polynomial.polynomial.polyfit(np.log(f[np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]), np.log(w[0,np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),1)
    feats[:,7] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5)
    if np.any(feats==np.nan): print('NaN detected')

    return np.nan_to_num(feats)
tools.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def feat_eog(signals):
    """
    calculate the EOG features
    :param signals: 1D or 2D signals
    """

    if signals.ndim == 1: signals = np.expand_dims(signals,0)
    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    w = (fft(signals,axis=1)).real   
    feats = np.zeros((signals.shape[0],15),dtype='float32')
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    sum_abs_pow = delta + theta + alpha + beta + gamma
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5) #smean
    feats[:,6] = np.sqrt(np.max(signals, axis=1))    #PAV
    feats[:,7] = np.sqrt(np.abs(np.min(signals, axis=1)))   #VAV   
    feats[:,8] = np.argmax(signals, axis=1)/nsamp #PAP
    feats[:,9] = np.argmin(signals, axis=1)/nsamp #VAP
    feats[:,10] = np.sqrt(np.sum(np.abs(signals), axis=1)/ np.mean(np.sum(np.abs(signals), axis=1))) # AUC
    feats[:,11] = np.sum(((np.roll(np.sign(signals), 1,axis=1) - np.sign(signals)) != 0).astype(int),axis=1)/nsamp #TVC
    feats[:,12] = np.log10(np.std(signals, axis=1)) #STD/VAR
    feats[:,13] = np.log10(stats.kurtosis(signals,fisher=False,axis=1))       # kurtosis
    feats[:,14] = np.log10(-np.sum([(x/nsamp)*((np.log((x+np.spacing(1))/nsamp))) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    if np.any(feats==np.nan): print('NaN detected')
    return np.nan_to_num(feats)
tools.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 75 收藏 0 点赞 0 评论 0
def feat_emg(signals):
    """
    calculate the EMG median as defined by Leangkvist (2012),
    """
    if signals.ndim == 1: signals = np.expand_dims(signals,0)
    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    w = (fft(signals,axis=1)).real   
    feats = np.zeros((signals.shape[0],13),dtype='float32')
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    sum_abs_pow = delta + theta + alpha + beta + gamma
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5) #smean
    emg = np.sum(np.abs(w[:,np.arange(12.5*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)
    feats[:,6] = emg / np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)  # ratio of high freq to total motor
    feats[:,7] = np.median(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)    # median freq
    feats[:,8] = np.mean(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)   #  mean freq
    feats[:,9] = np.std(signals, axis=1)    #  std 
    feats[:,10] = np.mean(signals,axis=1)
    feats[:,11] = np.log10(stats.kurtosis(signals,fisher=False,axis=1) )
    feats[:,12] = np.log10(-np.sum([(x/nsamp)*((np.log((x+np.spacing(1))/nsamp))) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    if np.any(feats==np.nan): print('NaN detected')

    return np.nan_to_num(feats)
tone_est_ok.py 文件源码 项目:audio_scripts 作者: audiofilter 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def tone_est(sdata,sr):
    samples = len(sdata)
    fft_size = 2**int(floor(log(samples)/log(2.0)))
    freq = fft(sdata[0:fft_size])
    pdata = numpy.zeros(fft_size)
    for i in xrange(fft_size): pdata[i] = abs(freq[i])
    peak = 0
    peak_index = 0
    for i in xrange(fft_size/2):
        if (pdata[i] > peak):
            peak = pdata[i]
            peak_index = i


    R = peak*peak;
    p = (freq[peak_index+1].real * freq[peak_index].real + freq[peak_index+1].imag * freq[peak_index].imag)/R

    g = -p/(1.0-p)
    q = (freq[peak_index-1].real * freq[peak_index].real + freq[peak_index-1].imag * freq[peak_index].imag)/R
    e = q/(1.0-q)

    if ((p>0) and (q>0)):
        d = p
    else:
        d = q

    u = peak_index + d
    print "peak is at ",peak_index,"(",u,") and is ",peak        
    #print "u = ",0.5*u*sr/fft_size,' f[0] = ',f[0]

    sum_phase = freq[peak_index-1]*c(-1,d) + freq[peak_index]*c(0,d) + freq[peak_index+1]*c(1,d)

    sum_c_sq = abs(c(-1,d))*abs(c(-1,d)) + abs(c(0,d))*abs(c(0,d)) + abs(c(1,d))*abs(c(1,d))

    amp = (abs(sum_phase)/sum_c_sq)/fft_size

    phase_r = cmath.phase(sum_phase)
    freq_est = 0.5*u*sr/fft_size


    return (amp,freq_est,phase_r)
tone_est_ok.py 文件源码 项目:audio_scripts 作者: audiofilter 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def tone_est_near_index(sdata,index,range,sr):
    samples = len(sdata)
    fft_size = 2**int(floor(log(samples)/log(2.0)))
    freq = fft(sdata[0:fft_size])
    pdata = numpy.zeros(fft_size)
    for i in xrange(fft_size): pdata[i] = abs(freq[i])
    peak = 0
    peak_index = 0
    for i in xrange(2*range):
        if (pdata[index+i-range] > peak):
            peak = pdata[index+i-range]
            peak_index = index+i-range

    print "peak is at ",peak_index," and is ",peak        

    R = peak*peak;
    p = (freq[peak_index+1].real * freq[peak_index].real + freq[peak_index+1].imag * freq[peak_index].imag)/R

    g = -p/(1.0-p)
    q = (freq[peak_index-1].real * freq[peak_index].real + freq[peak_index-1].imag * freq[peak_index].imag)/R
    e = q/(1.0-q)

    if ((p>0) and (q>0)):
        d = p
    else:
        d = q

    u = peak_index + d

    sum_phase = freq[peak_index-1]*c(-1,d) +    freq[peak_index]*c(0,d) + freq[peak_index+1]*c(1,d)

    sum_c_sq = abs(c(-1,d))*abs(c(-1,d)) +    abs(c(0,d))*abs(c(0,d)) + abs(c(1,d))*abs(c(1,d))

    amp = (abs(sum_phase)/sum_c_sq)/fft_size

    phase_r = cmath.phase(sum_phase)
    freq_est = 0.5*u*sr/fft_size

    return (amp,freq_est,phase_r)
disp_mp3.py 文件源码 项目:audio_scripts 作者: audiofilter 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def process_data(start,sum_data):
    input_data = read('junk.wav')
    audio_in = input_data[1]
    samples = len(audio_in)
    intvl = start/seg
    k = 0
    var_thres = 2.2
    data_out=[]

    #print "intvl = ",intvl,start,seg
    for i in xrange(intvl):
        buffer_out = []
        for j in xrange(seg):
            buffer_out.append(audio_in[k])
            k = k+1
        cbuffer_out = fft(buffer_out)
        for j in xrange(seg):
            if (abs(cbuffer_out[j]) < var_thres*sum_data[j]):
                cbuffer_out[j] = 0.02*cbuffer_out[j];
        buf_out = ifft(cbuffer_out)
        for j in xrange(seg):
            data_out.append(buf_out[j].real)

    sar = numpy.array(data_out, dtype=numpy.int16)
    write("junk_out.wav",44100,sar)
    cmd4 = 'lame junk_out.wav junk_out.mp3 >enc.log 2>&1 '
    os.system(cmd4)
tone_est.py 文件源码 项目:audio_scripts 作者: audiofilter 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def tone_est(sdata,sr):
    samples = len(sdata)
    fft_size = 2**int(floor(log(samples)/log(2.0)))
    freq = fft(sdata[0:fft_size])
    pdata = numpy.zeros(fft_size)
    for i in xrange(fft_size): pdata[i] = abs(freq[i])
    peak = 0
    peak_index = 0
    for i in xrange(fft_size/2):
        if (pdata[i] > peak):
            peak = pdata[i]
            peak_index = i

    R = peak*peak;
    p = (freq[peak_index+1].real * freq[peak_index].real + freq[peak_index+1].imag * freq[peak_index].imag)/R
    q = (freq[peak_index-1].real * freq[peak_index].real + freq[peak_index-1].imag * freq[peak_index].imag)/R

    g = -p/(1.0-p)
    e = q/(1.0-q)

    if ((p>0) and (q>0)):
        d = p
    else:
        d = q

    u = peak_index + d
    freq_est = u*sr/fft_size
    if (debug_estimates):
        print "peak is at ",peak_index,"(",u,") and is ",peak

    #d = 0.5*(p+q) + h(p*p) + h(q*q)
    #print "other peak index (2)", u+d

    sum_phase = freq[peak_index-1]*c(-1,d) + freq[peak_index]*c(0,d) + freq[peak_index+1]*c(1,d)
    sum_c_sq = abs(c(-1,d))*abs(c(-1,d)) + abs(c(0,d))*abs(c(0,d)) + abs(c(1,d))*abs(c(1,d))
    amp = (abs(sum_phase)/sum_c_sq)/fft_size
    phase_r = cmath.phase(sum_phase)

    return (amp,freq_est,phase_r)
tone_est.py 文件源码 项目:audio_scripts 作者: audiofilter 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def tone_est_above_index(sdata,index,sr):
    samples = len(sdata)
    fft_size = 2**int(floor(log(samples)/log(2.0)))
    freq = fft(sdata[0:fft_size])
    pdata = numpy.zeros(fft_size)
    for i in xrange(fft_size): pdata[i] = abs(freq[i])
    peak = 0
    peak_index = 0
    for i in xrange(fft_size/2):
        if (i > index):
            if (pdata[i] > peak):
                peak = pdata[i]
                peak_index = i

    R = peak*peak;
    p = (freq[peak_index+1].real * freq[peak_index].real + freq[peak_index+1].imag * freq[peak_index].imag)/R

    g = -p/(1.0-p)
    q = (freq[peak_index-1].real * freq[peak_index].real + freq[peak_index-1].imag * freq[peak_index].imag)/R
    e = q/(1.0-q)

    if ((p>0) and (q>0)):
        d = p
    else:
        d = q

    u = peak_index + d
    if (debug_estimates):
        print "peak is at ",peak_index,"(",u,") and is ",peak        

    sum_phase = freq[peak_index-1]*c(-1,d) +    freq[peak_index]*c(0,d) + freq[peak_index+1]*c(1,d)

    sum_c_sq = abs(c(-1,d))*abs(c(-1,d)) +    abs(c(0,d))*abs(c(0,d)) + abs(c(1,d))*abs(c(1,d))

    amp = (abs(sum_phase)/sum_c_sq)/fft_size

    phase_r = cmath.phase(sum_phase)
    freq_est = u*sr/fft_size

    return (amp,freq_est,phase_r)
util.py 文件源码 项目:pyssp 作者: shunsukeaihara 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def compute_avgamplitude(signal, winsize, window):
    windownum = int(len(signal) / (winsize / 2)) - 1
    avgamp = np.zeros(winsize)
    for l in xrange(windownum):
        avgamp += np.absolute(sp.fft(get_frame(signal, winsize, l) * window))
    return avgamp / float(windownum)
util.py 文件源码 项目:pyssp 作者: shunsukeaihara 项目源码 文件源码 阅读 56 收藏 0 点赞 0 评论 0
def compute_avgpowerspectrum(signal, winsize, window):
    windownum = int(len(signal) / (winsize / 2)) - 1
    avgpow = np.zeros(winsize)
    for l in xrange(windownum):
        avgpow += np.absolute(sp.fft(get_frame(signal, winsize, l) * window))**2.0
    return avgpow / float(windownum)
fft.py 文件源码 项目:Building-Machine-Learning-Systems-With-Python-Second-Edition 作者: PacktPublishing 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def write_fft(fft_features, fn):
    """
    Write the FFT features to separate files to speed up processing.
    """
    base_fn, ext = os.path.splitext(fn)
    data_fn = base_fn + ".fft"

    np.save(data_fn, fft_features)
    print("Written "%data_fn)
fft.py 文件源码 项目:Building-Machine-Learning-Systems-With-Python-Second-Edition 作者: PacktPublishing 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def create_fft(fn):
    sample_rate, X = scipy.io.wavfile.read(fn)

    fft_features = abs(scipy.fft(X)[:1000])
    write_fft(fft_features, fn)
fft.py 文件源码 项目:Building-Machine-Learning-Systems-With-Python-Second-Edition 作者: PacktPublishing 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def read_fft(genre_list, base_dir=GENRE_DIR):
    X = []
    y = []
    for label, genre in enumerate(genre_list):
        genre_dir = os.path.join(base_dir, genre, "*.fft.npy")
        file_list = glob.glob(genre_dir)
        assert(file_list), genre_dir
        for fn in file_list:
            fft_features = np.load(fn)

            X.append(fft_features[:2000])
            y.append(label)

    return np.array(X), np.array(y)
fft.py 文件源码 项目:Building-Machine-Learning-Systems-With-Python-Second-Edition 作者: PacktPublishing 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def plot_wav_fft(wav_filename, desc=None):
    plt.clf()
    plt.figure(num=None, figsize=(6, 4))
    sample_rate, X = scipy.io.wavfile.read(wav_filename)
    spectrum = np.fft.fft(X)
    freq = np.fft.fftfreq(len(X), 1.0 / sample_rate)

    plt.subplot(211)
    num_samples = 200.0
    plt.xlim(0, num_samples / sample_rate)
    plt.xlabel("time [s]")
    plt.title(desc or wav_filename)
    plt.plot(np.arange(num_samples) / sample_rate, X[:num_samples])
    plt.grid(True)

    plt.subplot(212)
    plt.xlim(0, 5000)
    plt.xlabel("frequency [Hz]")
    plt.xticks(np.arange(5) * 1000)
    if desc:
        desc = desc.strip()
        fft_desc = desc[0].lower() + desc[1:]
    else:
        fft_desc = wav_filename
    plt.title("FFT of %s" % fft_desc)
    plt.plot(freq, abs(spectrum), linewidth=5)
    plt.grid(True)

    plt.tight_layout()

    rel_filename = os.path.split(wav_filename)[1]
    plt.savefig("%s_wav_fft.png" % os.path.splitext(rel_filename)[0],
                bbox_inches='tight')

    plt.show()
transform.py 文件源码 项目:WaveletQuotes 作者: JobyKK 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def cwt_freq(data, wavelet, widths, dt, axis):
    # compute in frequency
    # next highest power of two for padding
    N = data.shape[axis]
    pN = int(2 ** np.ceil(np.log2(N)))
    # N.B. padding in fft adds zeros to the *end* of the array,
    # not equally either end.
    fft_data = scipy.fft(data, n=pN, axis=axis)
    # frequencies
    w_k = np.fft.fftfreq(pN, d=dt) * 2 * np.pi

    # sample wavelet and normalise
    norm = (2 * np.pi * widths / dt) ** .5
    wavelet_data = norm[:, None] * wavelet(w_k, widths[:, None])

    # Convert negative axis. Add one to account for
    # inclusion of widths axis above.
    axis = (axis % data.ndim) + 1

    # perform the convolution in frequency space
    slices = [slice(None)] + [None for _ in data.shape]
    slices[axis] = slice(None)

    out = scipy.ifft(fft_data[None] * wavelet_data.conj()[slices],
                     n=pN, axis=axis)

    # remove zero padding
    slices = [slice(None) for _ in out.shape]
    slices[axis] = slice(None, N)

    if data.ndim == 1:
        return out[slices].squeeze()
    else:
        return out[slices]
transform.py 文件源码 项目:WaveletQuotes 作者: JobyKK 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def cwt_freq(data, wavelet, widths, dt, axis):
    # compute in frequency
    # next highest power of two for padding
    N = data.shape[axis]
    pN = int(2 ** np.ceil(np.log2(N)))
    # N.B. padding in fft adds zeros to the *end* of the array,
    # not equally either end.
    fft_data = scipy.fft(data, n=pN, axis=axis)
    # frequencies
    w_k = np.fft.fftfreq(pN, d=dt) * 2 * np.pi

    # sample wavelet and normalise
    norm = (2 * np.pi * widths / dt) ** .5
    wavelet_data = norm[:, None] * wavelet(w_k, widths[:, None])

    # Convert negative axis. Add one to account for
    # inclusion of widths axis above.
    axis = (axis % data.ndim) + 1

    # perform the convolution in frequency space
    slices = [slice(None)] + [None for _ in data.shape]
    slices[axis] = slice(None)

    out = scipy.ifft(fft_data[None] * wavelet_data.conj()[slices],
                     n=pN, axis=axis)

    # remove zero padding
    slices = [slice(None) for _ in out.shape]
    slices[axis] = slice(None, N)

    if data.ndim == 1:
        return out[slices].squeeze()
    else:
        return out[slices]
fft_basics.py 文件源码 项目:ML 作者: saurabhsuman47 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def generate_plot_fft(wave_filename, max_freq_plot = None):
    sample_rate, X = scipy.io.wavfile.read(wave_filename)
    duration = ((X.shape[0] / sample_rate))
    N = X.shape[0] # number of samples
    T = 1.0 / sample_rate# sample spacing
    Y = sp.fft(X) # calculate fft
    #factor expands the x scale
    factor = 1
    if max_freq_plot is not None:
        factor = 2*max_freq_plot*T
    xf = np.linspace(0.0, 1*factor/(2.0*T), N*factor/2) #get x axis points =  freq for plotting
    plt.plot(xf, 2.0/N * np.abs(Y[0:N*factor/2])) # plot x and y (number of y points should be equal to x)
    plt.grid()
    plt.show()

# without support max freq upto which it should be plotted option, it plots upto sample_rate/2 Hz
# simple to understand, first check this
#def generate_plot_fft(wave_filename):
#    sample_rate, X = scipy.io.wavfile.read(wave_filename)
#    duration = ((X.shape[0] / sample_rate))
#    Y = sp.fftpack.fft(X) # calculate fft
#    N = X.shape[0] # number of samples
#    T = 1.0 / sample_rate# sample spacing
#    xf = np.linspace(0.0, 1.0/(2.0*T), N/2)
#    plt.plot(xf, 2.0/N * np.abs(Y[0:N/2]))
#    plt.grid()
#    plt.show()


# $ sox --null -r 22050 sine_a.wav synth 0.2 sine 400
# $ sox --null -r 22050 sine_b.wav synth 0.2 sine 3000
# $ sox --combine mix --volume 1 sine_b.wav --volume 0.5 sine_a.wav sine_mix.wav
generate_fft_for_all_files_in_directory.py 文件源码 项目:ML 作者: saurabhsuman47 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def generate_and_save_fft(wave_file):
    sample_rate, X = sp.io.wavfile.read(wave_file)
    Y = abs((sp.fft(X))[:100000])   # change if different number of components are desired,
                                    # max value for genres dataset is more than 600000
    fft_file = wave_file[:-3] + "fft"
#    print fft_file
    np.save(fft_file, Y)

#test
nmrDataMod.py 文件源码 项目:pyNMR 作者: bennomeier 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def lineBroadening(self, fromPos, toPos, LB):
        """Applies line broadening of given widh (in Hz) to the FID. Resulting spectrum
        (after fft is called) will be convolution of the original spectrum (fromPos)
        and Lorentzian with full-width-at-half-maximum equal to LB"""
        self.checkToPos(toPos)
        self.allFid[toPos] = sp.multiply(self.allFid[fromPos][:], sp.exp(-self.fidTime*LB*np.pi))
nmrDataMod.py 文件源码 项目:pyNMR 作者: bennomeier 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def fourierTransform(self, fromPos, toPos, only = []):
        self.checkToPos(toPos)
        if len(only) > 0:
            self.allFid[toPos] = np.array([fftshift(fft(self.allFid[fromPos][fidIndex])) for fidIndex in only])
        else:
            self.allFid[toPos] = np.array([fftshift(fft(fid)) for fid in self.allFid[fromPos]])

        self.frequency = np.linspace(-self.sweepWidthTD2/2,self.sweepWidthTD2/2,len(self.allFid[fromPos][0]))
signal_processing.py 文件源码 项目:bird-species-classification 作者: johnmartinsson 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def stft(x, fs, framesz, hop):
    framesamp = int(framesz*fs)
    hopsamp = int(hop*fs)
    w = scipy.hanning(framesamp)
    X = scipy.array([scipy.fft(w*x[i:i+framesamp])
                     for i in range(0, len(x)-framesamp, hopsamp)])
    return X
transform.py 文件源码 项目:GRIPy 作者: giruenf 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def cwt_freq(data, wavelet, widths, dt, axis):
    # compute in frequency
    # next highest power of two for padding
    N = data.shape[axis]
    pN = int(2 ** np.ceil(np.log2(N)))
    # N.B. padding in fft adds zeros to the *end* of the array,
    # not equally either end.
    fft_data = scipy.fft(data, n=pN, axis=axis)
    # frequencies
    w_k = np.fft.fftfreq(pN, d=dt) * 2 * np.pi

    # sample wavelet and normalise
    norm = (2 * np.pi * widths / dt) ** .5
    wavelet_data = norm[:, None] * wavelet(w_k, widths[:, None])

    # Convert negative axis. Add one to account for
    # inclusion of widths axis above.
    axis = (axis % data.ndim) + 1

    # perform the convolution in frequency space
    slices = [slice(None)] + [None for _ in data.shape]
    slices[axis] = slice(None)

    out = scipy.ifft(fft_data[None] * wavelet_data.conj()[slices],
                     n=pN, axis=axis)

    # remove zero padding
    slices = [slice(None) for _ in out.shape]
    slices[axis] = slice(None, N)

    if data.ndim == 1:
        return out[slices].squeeze()
    else:
        return out[slices]
transform.py 文件源码 项目:GRIPy 作者: giruenf 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def w_k(self):
        """Angular frequency as a function of Fourier index.

        See eq5 of TC.

        N.B the frequencies returned by numpy are adimensional, on
        the interval [-1/2, 1/2], so we multiply by 2 * pi.
        """
        return 2 * np.pi * np.fft.fftfreq(self.N, self.dt)
tone_est_ok.py 文件源码 项目:audio_scripts 作者: audiofilter 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def tone_est_above_index(sdata,index,sr):
    samples = len(sdata)
    fft_size = 2**int(floor(log(samples)/log(2.0)))
    freq = fft(sdata[0:fft_size])
    pdata = numpy.zeros(fft_size)
    for i in xrange(fft_size): pdata[i] = abs(freq[i])
    peak = 0
    peak_index = 0
    for i in xrange(fft_size/2):
        if (i > index):
            if (pdata[i] > peak):
                peak = pdata[i]
                peak_index = i

    print "peak is at ",peak_index," and is ",peak        

    R = peak*peak;
    p = (freq[peak_index+1].real * freq[peak_index].real + freq[peak_index+1].imag * freq[peak_index].imag)/R

    g = -p/(1.0-p)
    q = (freq[peak_index-1].real * freq[peak_index].real + freq[peak_index-1].imag * freq[peak_index].imag)/R
    e = q/(1.0-q)

    if ((p>0) and (q>0)):
        d = p
    else:
        d = q

    u = peak_index + d

    sum_phase = freq[peak_index-1]*c(-1,d) +    freq[peak_index]*c(0,d) + freq[peak_index+1]*c(1,d)

    sum_c_sq = abs(c(-1,d))*abs(c(-1,d)) +    abs(c(0,d))*abs(c(0,d)) + abs(c(1,d))*abs(c(1,d))

    amp = (abs(sum_phase)/sum_c_sq)/fft_size

    phase_r = cmath.phase(sum_phase)
    freq_est = 0.5*u*sr/fft_size

    return (amp,freq_est,phase_r)

# REMOVAL CASES


问题


面经


文章

微信
公众号

扫码关注公众号