def stft(sig, frameSize, overlapFac=0.75, window=np.hanning):
""" short time fourier transform of audio signal """
win = window(frameSize)
hopSize = int(frameSize - np.floor(overlapFac * frameSize))
# zeros at beginning (thus center of 1st window should be for sample nr. 0)
# samples = np.append(np.zeros(np.floor(frameSize / 2.0)), sig)
samples = np.array(sig, dtype='float64')
# cols for windowing
cols = np.ceil((len(samples) - frameSize) / float(hopSize)) + 1
# zeros at end (thus samples can be fully covered by frames)
# samples = np.append(samples, np.zeros(frameSize))
frames = stride_tricks.as_strided(
samples,
shape=(cols, frameSize),
strides=(samples.strides[0] * hopSize, samples.strides[0])).copy()
frames *= win
return np.fft.rfft(frames)
# all the definition of the flowing variable can be found
# train_net.py
python类hanning()的实例源码
audio_eval.py 文件源码
项目:Multi-channel-speech-extraction-using-DNN
作者: zhr1201
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def hanning(M):
"""Returns the Hanning window.
The Hanning window is defined as
.. math::
w(n) = 0.5 - 0.5\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
Args:
M (:class:`~int`):
Number of points in the output window. If zero or less, an empty
array is returned.
Returns:
~cupy.ndarray: Output ndarray.
.. seealso:: :func:`numpy.hanning`
"""
if M < 1:
return from_data.array([])
if M == 1:
return basic.ones(1, float)
n = ranges.arange(0, M)
return 0.5 - 0.5 * trigonometric.cos(2.0 * numpy.pi * n / (M - 1))
def ams_extractor(x, sr, win_len, shift_len, order):
from scipy.signal import hilbert
envelope = np.abs(hilbert(x))
for i in range(order-1):
envelope = np.abs(hilbert(envelope))
envelope = envelope * 1./3.
frames = (len(envelope) - win_len) // shift_len
hanning_window = np.hanning(win_len)
ams_feature = np.zeros(shape=(15, frames))
wts = cal_triangle_window(0, sr//2, win_len, 15, 15.6, 400)
for i in range(frames):
one_frame = x[i*shift_len:i*shift_len+win_len]
one_frame = one_frame * hanning_window
frame_fft = np.abs(np.fft.fft(one_frame, win_len))
ams_feature[:,i] = np.matmul(wts, frame_fft)
return ams_feature
cochleagram_extractor.py 文件源码
项目:speech_feature_extractor
作者: ZhihaoDU
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def cochleagram_extractor(xx, sr, win_len, shift_len, channel_number, win_type):
fcoefs, f = make_erb_filters(sr, channel_number, 50)
fcoefs = np.flipud(fcoefs)
xf = erb_frilter_bank(xx, fcoefs)
if win_type == 'hanning':
window = np.hanning(channel_number)
elif win_type == 'hamming':
window = np.hamming(channel_number)
elif win_type == 'triangle':
window = (1 - (np.abs(channel_number - 1 - 2 * np.arange(1, channel_number + 1, 1)) / (channel_number + 1)))
else:
window = np.ones(channel_number)
window = window.reshape((channel_number, 1))
xe = np.power(xf, 2.0)
frames = 1 + ((np.size(xe, 1)-win_len) // shift_len)
cochleagram = np.zeros((channel_number, frames))
for i in range(frames):
one_frame = np.multiply(xe[:, i*shift_len:i*shift_len+win_len], np.repeat(window, win_len, 1))
cochleagram[:, i] = np.sqrt(np.mean(one_frame, 1))
cochleagram = np.where(cochleagram == 0.0, np.finfo(float).eps, cochleagram)
return cochleagram
def log_power_spectrum_extractor(x, win_len, shift_len, win_type, is_log=False):
samples = x.shape[0]
frames = (samples - win_len) // shift_len
stft = np.zeros((win_len, frames), dtype=np.complex64)
spect = np.zeros((win_len // 2 + 1, frames), dtype=np.float64)
if win_type == 'hanning':
window = np.hanning(win_len)
elif win_type == 'hamming':
window = np.hamming(win_len)
elif win_type == 'rectangle':
window = np.ones(win_len)
for i in range(frames):
one_frame = x[i*shift_len: i*shift_len+win_len]
windowed_frame = np.multiply(one_frame, window)
stft[:, i] = np.fft.fft(windowed_frame, win_len)
if is_log:
spect[:, i] = np.log(np.power(np.abs(stft[0: win_len//2+1, i]), 2.))
else:
spect[:, i] = np.power(np.abs(stft[0: win_len//2+1, i]), 2.)
return spect
def stft_extractor(x, win_len, shift_len, win_type):
samples = x.shape[0]
frames = (samples - win_len) // shift_len
stft = np.zeros((win_len, frames), dtype=np.complex64)
spect = np.zeros((win_len // 2 + 1, frames), dtype=np.complex64)
if win_type == 'hanning':
window = np.hanning(win_len)
elif win_type == 'hamming':
window = np.hamming(win_len)
elif win_type == 'rectangle':
window = np.ones(win_len)
for i in range(frames):
one_frame = x[i*shift_len: i*shift_len+win_len]
windowed_frame = np.multiply(one_frame, window)
stft[:, i] = np.fft.fft(windowed_frame, win_len)
spect[:, i] = stft[: win_len//2+1, i]
return spect
def ams_extractor(x, sr, win_len, shift_len, order=1, decimate_coef=1./4.):
from scipy.signal import hilbert
envelope = np.abs(hilbert(x))
for i in range(order-1):
envelope = np.abs(hilbert(envelope))
envelope = envelope * decimate_coef
frames = (len(envelope) - win_len) // shift_len
hanning_window = np.hanning(win_len)
ams_feature = np.zeros(shape=(15, frames))
wts = cal_triangle_window(0, sr//2, win_len, 15, 15.6, 400)
for i in range(frames):
one_frame = x[i*shift_len:i*shift_len+win_len]
one_frame = one_frame * hanning_window
frame_fft = np.abs(np.fft.fft(one_frame, win_len))
ams_feature[:,i] = np.matmul(wts, frame_fft)
return ams_feature
def unknown_feature_extractor(x, sr, win_len, shift_len, barks, inner_win, inner_shift, win_type, method_version):
x_spectrum = stft_extractor(x, win_len, shift_len, win_type)
coef = get_fft_bark_mat(sr, win_len, barks, 20, sr//2)
bark_spect = np.matmul(coef, x_spectrum)
ams = np.zeros((barks, inner_win//2+1, (bark_spect.shape[1] - inner_win)//inner_shift))
for i in range(barks):
channel_stft = stft_extractor(bark_spect[i, :], inner_win, inner_shift, 'hanning')
if method_version == 'v1':
ams[i, :, :] = 20 * np.log(np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift]))
elif method_version == 'v2':
channel_amplitude = np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])
channel_angle = np.angle(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])
channel_angle = channel_angle - (np.floor(channel_angle / (2.*np.pi)) * (2.*np.pi))
ams[i, :, :] = np.power(channel_amplitude, 1./3.) * channel_angle
else:
ams[i, :, :] = np.abs(channel_stft)
return ams
def spectrum_extractor(x, win_len, shift_len, win_type, is_log):
samples = x.shape[0]
frames = (samples - win_len) // shift_len
stft = np.zeros((win_len, frames), dtype=np.complex64)
spectrum = np.zeros((win_len // 2 + 1, frames), dtype=np.float64)
if win_type == 'hanning':
window = np.hanning(win_len)
elif win_type == 'hamming':
window = np.hamming(win_len)
elif win_type == 'triangle':
window = (1 - (np.abs(win_len - 1 - 2 * np.arange(1, win_len + 1, 1)) / (win_len + 1)))
else:
window = np.ones(win_len)
for i in range(frames):
one_frame = x[i*shift_len: i*shift_len+win_len]
windowed_frame = np.multiply(one_frame, window)
stft[:, i] = np.fft.fft(windowed_frame, win_len)
if is_log:
spectrum[:, i] = np.log(np.abs(stft[0: win_len//2+1, i]))
else:
spectrum[:, i] = np.abs(stft[0: win_len // 2 + 1:, i])
return spectrum
spectrogram.py 文件源码
项目:Multi-channel-speech-extraction-using-DNN
作者: zhr1201
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def stft(sig, frameSize, overlapFac=0.75, window=np.hanning):
""" short time fourier transform of audio signal """
win = window(frameSize)
hopSize = int(frameSize - np.floor(overlapFac * frameSize))
# zeros at beginning (thus center of 1st window should be for sample nr. 0)
# samples = np.append(np.zeros(np.floor(frameSize / 2.0)), sig)
samples = np.array(sig, dtype='float64')
# cols for windowing
cols = np.floor((len(samples) - frameSize) / float(hopSize))
# zeros at end (thus samples can be fully covered by frames)
# samples = np.append(samples, np.zeros(frameSize))
frames = stride_tricks.as_strided(
samples,
shape=(cols, frameSize),
strides=(samples.strides[0] * hopSize, samples.strides[0])).copy()
frames *= win
return np.fft.rfft(frames)
audio_eval.py 文件源码
项目:Multi-channel-speech-extraction-using-DNN
作者: zhr1201
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def stft(sig, frameSize, overlapFac=0.75, window=np.hanning):
""" short time fourier transform of audio signal """
win = window(frameSize)
hopSize = int(frameSize - np.floor(overlapFac * frameSize))
# zeros at beginning (thus center of 1st window should be for sample nr. 0)
# samples = np.append(np.zeros(np.floor(frameSize / 2.0)), sig)
samples = np.array(sig, dtype='float64')
# cols for windowing
cols = np.ceil((len(samples) - frameSize) / float(hopSize)) + 1
# zeros at end (thus samples can be fully covered by frames)
# samples = np.append(samples, np.zeros(frameSize))
frames = stride_tricks.as_strided(
samples,
shape=(cols, frameSize),
strides=(samples.strides[0] * hopSize, samples.strides[0])).copy()
frames *= win
return np.fft.rfft(frames)
SENN_audio_eval.py 文件源码
项目:CNN-for-single-channel-speech-enhancement
作者: zhr1201
项目源码
文件源码
阅读 77
收藏 0
点赞 0
评论 0
def stft(sig, frameSize, overlapFac=0.75, window=np.hanning):
""" short time fourier transform of audio signal """
win = window(frameSize)
hopSize = int(frameSize - np.floor(overlapFac * frameSize))
# zeros at beginning (thus center of 1st window should be for sample nr. 0)
# samples = np.append(np.zeros(np.floor(frameSize / 2.0)), sig)
samples = np.array(sig, dtype='float64')
# cols for windowing
cols = np.ceil((len(samples) - frameSize) / float(hopSize)) + 1
# zeros at end (thus samples can be fully covered by frames)
samples = np.append(samples, np.zeros(frameSize))
frames = stride_tricks.as_strided(
samples,
shape=(cols, frameSize),
strides=(samples.strides[0] * hopSize, samples.strides[0])).copy()
frames *= win
return np.fft.rfft(frames)
def __init__(self, sampling_rate=16000, frame_width=0.032, frame_shift=0.01, num_mel_filters=40, window_func="hanning",
using_delta=True, using_delta_delta=True):
assert window_func in ["hanning", "hamming"]
self.sampling_rate = sampling_rate
self.frame_width = frame_width
self.sampling_rate = sampling_rate
self.frame_width = frame_width
self.frame_shift = frame_shift
self.num_fft = int(sampling_rate * frame_width)
self.num_mel_filters = num_mel_filters
if window_func == "hanning":
self.window_func = lambda x:np.hanning(x)
elif winfunc == "hamming":
self.window_func = lambda x:np.hamming(x)
self.using_delta = using_delta
self.using_delta_delta = using_delta_delta
self.fbank = fft.get_filterbanks(nfft=self.num_fft, nfilt=num_mel_filters, samplerate=sampling_rate)
def periodic_hann(window_length):
"""Calculate a "periodic" Hann window.
The classic Hann window is defined as a raised cosine that starts and
ends on zero, and where every value appears twice, except the middle
point for an odd-length window. Matlab calls this a "symmetric" window
and np.hanning() returns it. However, for Fourier analysis, this
actually represents just over one cycle of a period N-1 cosine, and
thus is not compactly expressed on a length-N Fourier basis. Instead,
it's better to use a raised cosine that ends just before the final
zero value - i.e. a complete cycle of a period-N cosine. Matlab
calls this a "periodic" window. This routine calculates it.
Args:
window_length: The number of points in the returned window.
Returns:
A 1D np.array containing the periodic hann window.
"""
return 0.5 - (0.5 * np.cos(2 * np.pi / window_length *
np.arange(window_length)))
def stft_magnitude(signal, fft_length,
hop_length=None,
window_length=None):
"""Calculate the short-time Fourier transform magnitude.
Args:
signal: 1D np.array of the input time-domain signal.
fft_length: Size of the FFT to apply.
hop_length: Advance (in samples) between each frame passed to FFT.
window_length: Length of each block of samples to pass to FFT.
Returns:
2D np.array where each row contains the magnitudes of the fft_length/2+1
unique values of the FFT for the corresponding frame of input samples.
"""
frames = frame(signal, window_length, hop_length)
# Apply frame window to each frame. We use a periodic Hann (cosine of period
# window_length) instead of the symmetric Hann of np.hanning (period
# window_length-1).
window = periodic_hann(window_length)
windowed_frames = frames * window
return np.abs(np.fft.rfft(windowed_frames, int(fft_length)))
# Mel spectrum constants and functions.
def compute_pairwise_shifts(imstack):
# Calculates the pairwise shifts for images in a stack of format [frame, x, y].
# returns shift vector as [y, x] for each pair, a 2 x N-1 array where N is num_frames
scan_shape = imstack.shape
num_pairs = scan_shape[0]-1
print('Correcting ' + str(num_pairs) + ' frames...')
# Prepare window function (Hann)
win = np.outer(np.hanning(scan_shape[1]),np.hanning(scan_shape[2]))
# Pairwise shifts
shift = np.zeros((2, num_pairs))
for iPair in range(0, num_pairs):
image = imstack[iPair]
offset_image = imstack[iPair+1]
shift[:,iPair], error, diffphase = register_translation_hybrid(image*win, offset_image*win,
exponent = 0.3, upsample_factor = 100)
# Shifts are defined as [y, x] where y is shift of imaging location
# with respect to positive y axis, similarly for x
return shift
def __init__(self, window_size, segments_buf=None):
self._window_size = window_size
if segments_buf is None:
segments_buf = int(SAMPLE_RATE / window_size)
self._segments_buf = segments_buf
self._thresholding_window_size = THRESHOLD_WINDOW_SIZE
assert self._thresholding_window_size <= segments_buf
self._last_spectrum = np.zeros(window_size, dtype=np.int16)
self._last_flux = deque(
np.zeros(segments_buf, dtype=np.int16), segments_buf)
self._last_prunned_flux = 0
self._hanning_window = np.hanning(window_size)
# The zeros which will be used to double each segment size
self._inner_pad = np.zeros(window_size)
# To ignore the first peak just after starting the application
self._first_peak = True
def managed_window(self,axismanager, data, windowaxis):
def window_axis_function(a, window):
a = a * window
return a
newaxis = axismanager.current_axis_number(windowaxis)
N = data.shape[newaxis]
window = np.hanning(N)
# Save "window summed and squared" (see Numerical Recipes)
wss = np.sum(window**2.0)/float(N)
# Apply window
windoweddata = np.apply_along_axis(window_axis_function,
newaxis, data, window)
return windoweddata, wss
def measureLoop(self):
""" Measure 10 values, add them to buffer and remove the 10 oldest values.
"""
if self.stopRequest:
self.stopRequest = False
self.unlock()
return
data = np.zeros((100, self._data_logic.getChannels()))
data[:, 0] = np.array([self._data_logic.getData() for i in range(100)])
self.buf = np.roll(self.buf, -100, axis=0)
self.buf[-101:-1] = data
w = np.hanning(self.window_len)
s = np.r_[self.buf[self.window_len-1:0:-1], self.buf, self.buf[-1:-self.window_len:-1]]
for channel in range(self._data_logic.getChannels()):
convolved = np.convolve(w/w.sum(), s[:, channel], mode='valid')
self.smooth[:, channel] = convolved
self.sigRepeat.emit()
def undo_stft(spect, hop_size, frame_len=None, unwindow='auto'):
"""
Undoes an SFTF via overlap-add, returning a numpy array of samples.
"""
# transform into time domain
spect = np.fft.irfft(spect, n=frame_len, axis=1)
# overlap-and-add
num_frames, frame_len = spect.shape
win = np.hanning(frame_len)
#win = np.sin(np.pi * np.arange(frame_len) / frame_len)
#win = 1
if unwindow == 'auto':
unwindow = (hop_size <= frame_len//2)
samples = np.zeros((num_frames - 1) * hop_size + frame_len)
if unwindow:
factors = np.zeros_like(samples)
for idx, frame in enumerate(spect):
oidx = int(idx*hop_size)
samples[oidx:oidx+frame_len] += frame * win
if unwindow:
factors[oidx:oidx+frame_len] += win**2
if unwindow:
np.maximum(factors, .1 * factors.max(), factors)
samples /= factors
return samples
def stft(sig, frameSize, overlapFac=0.75, window=np.hanning):
""" short time fourier transform of audio signal """
win = window(frameSize)
hopSize = int(frameSize - np.floor(overlapFac * frameSize))
# zeros at beginning (thus center of 1st window should be for sample nr. 0)
# samples = np.append(np.zeros(np.floor(frameSize / 2.0)), sig)
samples = np.array(sig, dtype='float64')
# cols for windowing
cols = np.ceil((len(samples) - frameSize) / float(hopSize)) + 1
# zeros at end (thus samples can be fully covered by frames)
samples = np.append(samples, np.zeros(frameSize))
frames = stride_tricks.as_strided(
samples,
shape=(cols, frameSize),
strides=(samples.strides[0] * hopSize, samples.strides[0])).copy()
frames *= win
return np.fft.rfft(frames)
def stft(sig, frameSize, overlapFac=0.75, window=np.hanning):
""" short time fourier transform of audio signal """
win = window(frameSize)
hopSize = int(frameSize - np.floor(overlapFac * frameSize))
# zeros at beginning (thus center of 1st window should be for sample nr. 0)
# samples = np.append(np.zeros(np.floor(frameSize / 2.0)), sig)
samples = np.array(sig, dtype='float64')
# cols for windowing
cols = np.ceil((len(samples) - frameSize) / float(hopSize)) + 1
# zeros at end (thus samples can be fully covered by frames)
samples = np.append(samples, np.zeros(frameSize))
frames = stride_tricks.as_strided(
samples,
shape=(cols, frameSize),
strides=(samples.strides[0] * hopSize, samples.strides[0])).copy()
frames *= win
return np.fft.rfft(frames)
def stft(sig, frameSize, overlapFac=0.75, window=np.hanning):
""" short time fourier transform of audio signal """
win = window(frameSize)
hopSize = int(frameSize - np.floor(overlapFac * frameSize))
# zeros at beginning (thus center of 1st window should be for sample nr. 0)
# samples = np.append(np.zeros(np.floor(frameSize / 2.0)), sig)
samples = np.array(sig, dtype='float64')
# cols for windowing
cols = np.ceil((len(samples) - frameSize) / float(hopSize))
# zeros at end (thus samples can be fully covered by frames)
# samples = np.append(samples, np.zeros(frameSize))
frames = stride_tricks.as_strided(
samples,
shape=(cols, frameSize),
strides=(samples.strides[0] * hopSize, samples.strides[0])).copy()
frames *= win
return np.fft.rfft(frames)
def test_high_frequency_completion(self):
path = dirpath + '/data/test16000.wav'
fs, x = wavfile.read(path)
f0rate = 0.5
shifter = Shifter(fs, f0rate=f0rate)
mod_x = shifter.f0transform(x, completion=False)
mod_xc = shifter.f0transform(x, completion=True)
assert len(mod_x) == len(mod_xc)
N = 512
fl = int(fs * 25 / 1000)
win = np.hanning(fl)
sts = [1000, 5000, 10000, 20000]
for st in sts:
# confirm w/o completion
f_mod_x = fft(mod_x[st: st + fl] / 2**16 * win)
amp_mod_x = 20.0 * np.log10(np.abs(f_mod_x))
# confirm w/ completion
f_mod_xc = fft(mod_xc[st: st + fl] / 2**16 * win)
amp_mod_xc = 20.0 * np.log10(np.abs(f_mod_xc))
assert np.mean(amp_mod_x[N // 4:] < np.mean(amp_mod_xc[N // 4:]))
def analysis_with_del_comp_from_est_file(v_in_sig, est_file, fs, nFFT=None, win_func=np.hanning, b_ph_unv_zero=False, nwin_per_pitch_period=0.5):
if nFFT is None: # If fft length is not provided, some standard values are assumed.
if fs==48000:
nFFT=4096
elif fs==16000:
nFFT=2048
# Pitch Marks:-------------------------------------------------------------
v_pm_sec, v_voi = la.read_reaper_est_file(est_file, check_len_smpls=len(v_in_sig), fs=fs)
v_pm_smpls = v_pm_sec * fs
m_sp, m_ph, v_shift, m_frms, m_fft = analysis_with_del_comp_from_pm(v_in_sig, v_pm_smpls, nFFT, win_func=win_func, nwin_per_pitch_period=nwin_per_pitch_period)
if b_ph_unv_zero:
m_ph = m_ph * v_voi[:,None]
return m_sp, m_ph, v_shift, v_voi, m_frms, m_fft
#==============================================================================
# From (after) 'analysis_with_del_comp':
# new: returns voi/unv decision.
def stft(self, samples, window_size, overlap_factor=0.5, window_function=np.hanning):
"""
Perform Short-time Fourier transform to get the spectrogram for the given samples
:param samples: Complex samples
:param window_size: Size of DFT window
:param overlap_factor: Value between 0 (= No Overlapping) and 1 (= Full overlapping) of windows
:param window_function: Function for DFT window
:return: short-time Fourier transform of the given signal
"""
window = window_function(window_size)
# hop size determines by how many samples the window is advanced
hop_size = window_size - int(overlap_factor * window_size)
# pad with zeros to ensure last window fits signal
padded_samples = np.append(samples, np.zeros((len(samples) - window_size) % hop_size))
num_frames = ((len(padded_samples) - window_size) // hop_size) + 1
frames = [padded_samples[i*hop_size:i*hop_size+window_size] * window for i in range(num_frames)]
return np.fft.fft(frames)
def stft(sig, frame_size, overlap_fac=0.5, window=np.hanning):
""" short time fourier transform of audio signal """
win = window(frame_size)
hop_size = int(frame_size - np.floor(overlap_fac * frame_size))
# zeros at beginning (thus center of 1st window should be for sample nr. 0)
samples = np.append(np.zeros(np.floor(frame_size / 2.0)), sig)
# cols for windowing
cols = np.ceil((len(samples) - frame_size) / float(hop_size)) + 1
# zeros at end (thus samples can be fully covered by frames)
samples = np.append(samples, np.zeros(frame_size))
frames = stride_tricks.as_strided(
samples,
shape=(cols, frame_size),
strides=(
samples.strides[0] * hop_size,
samples.strides[0]
)
).copy()
frames *= win
return np.fft.rfft(frames)
def __init__(self, configs, chunks, chunksize, channels, rate):
self.configs = configs
self.channels = channels
self.chunksize = chunksize
self.channel_len = chunks*chunksize
self.fft_len = self.channel_len//2 + 1 # See numpy.fft.rfft
self.fft_freqs_in_hertz = np.fft.rfftfreq(self.channel_len, d=1.0/rate)
endpoint_notes = configs.settings.getmultistr('fft', 'endpoint_notes')
self.notespace = process.generic.notespace(
endpoint_notes[0], endpoint_notes[1],
step=1.0/6) # XXX
self.window = np.hanning(self.channel_len)
self.sensitivity = configs.settings.getfloat('fft', 'sensitivity')
self.compute_weights(self.sensitivity)
logger.debug("FFT length: {}".format(self.fft_len))
# Create a pyfftw.FFTW object
a = pyfftw.empty_aligned(
self.channel_len, dtype='int16', n=pyfftw.simd_alignment)
self.fft = pyfftw.builders.rfft(
a, overwrite_input=True, threads=multiprocessing.cpu_count())
SourceFilterModelSF.py 文件源码
项目:SourceFilterContoursMelody
作者: juanjobosch
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def hann(args):
"""
window = hann(args)
Computes a Hann window, with NumPy's function hanning(args).
"""
return np.hanning(args)
# FUNCTIONS FOR TIME-FREQUENCY REPRESENTATION
def iFFT(Y, output_length=None, window=False):
""" Inverse real-valued Fourier Transform
Parameters
----------
Y : array_like
Frequency domain data [Nsignals x Nbins]
output_length : int, optional
Lenght of returned time-domain signal (Default: 2 x len(Y) + 1)
win : boolean, optional
Weights the resulting time-domain signal with a Hann
Returns
-------
y : array_like
Reconstructed time-domain signal
"""
Y = _np.atleast_2d(Y)
y = _np.fft.irfft(Y, n=output_length)
if window:
if window not in {'hann', 'hamming', 'blackman', 'kaiser'}:
raise ValueError('Selected window must be one of hann, hamming, blackman or kaiser')
no_of_signals, no_of_samples = y.shape
if window == 'hann':
window_array = _np.hanning(no_of_samples)
elif window == 'hamming':
window_array = _np.hamming(no_of_samples)
elif window == 'blackman':
window_array = _np.blackman(no_of_samples)
elif window == 'kaiser':
window_array = _np.kaiser(no_of_samples, 3)
y = window_array * y
return y