python类fftconvolve()的实例源码

frequency_estimator.py 文件源码 项目:NetPower_TestBed 作者: Vignesh2208 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def freq_from_autocorr(sig, fs):
    """
    Estimate frequency using autocorrelation
    """
    # Calculate autocorrelation (same thing as convolution, but with
    # one input reversed in time), and throw away the negative lags
    corr = fftconvolve(sig, sig[::-1], mode='full')
    corr = corr[len(corr)//2:]

    # Find the first low point
    d = diff(corr)
    start = find(d > 0)[0]

    # Find the next peak after the low point (other than 0 lag).  This bit is
    # not reliable for long signals, due to the desired peak occurring between
    # samples, and other peaks appearing higher.
    # Should use a weighting function to de-emphasize the peaks at longer lags.
    peak = argmax(corr[start:]) + start
    px, py = parabolic(corr, peak)

    return fs / px
TFMethods.py 文件源码 项目:ASP 作者: TUIlmenauAMS 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def sincinterp(x):
        """
        Sinc interpolation for computation of fractional transformations.
        As appears in :
        -https://github.com/audiolabs/frft/
        ----------
        Args:
            f       : (array) Complex valued input array
            a       : (float) Alpha factor
        Returns:
            ret     : (array) Real valued synthesised data
        """
        N = len(x)
        y = np.zeros(2 * N - 1, dtype=x.dtype)
        y[:2 * N:2] = x
        xint = fftconvolve( y[:2 * N], np.sinc(np.arange(-(2 * N - 3), (2 * N - 2)).T / 2),)
        return xint[2 * N - 3: -2 * N + 3]
carrier.py 文件源码 项目:pactools 作者: pactools 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def direct(self, sigin):
        """
        apply this filter to a signal
        sigin : input signal (ndarray)
        returns the filtered signal (ndarray)
        """
        fftconvolve = signal.fftconvolve

        filtered = fftconvolve(sigin.ravel(), self.fir, 'same')
        if self.extract_complex:
            filtered_imag = fftconvolve(sigin.ravel(), self.fir_imag, 'same')

        if sigin.ndim == 2:
            filtered = filtered[None, :]
            if self.extract_complex:
                filtered_imag = filtered_imag[None, :]

        if self.extract_complex:
            return filtered, filtered_imag
        else:
            return filtered
fir.py 文件源码 项目:pactools 作者: pactools 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def transform(self, sigin):
        """Apply this filter to a signal

        Parameters
        ----------
        sigin : array, shape (n_points, ) or (n_signals, n_points)
            Input signal

        Returns
        -------
        filtered : array, shape (n_points, ) or (n_signals, n_points)
            Filtered signal
        """
        sigin_ndim = sigin.ndim
        sigin = np.atleast_2d(sigin)
        filtered = [signal.fftconvolve(sig, self.fir, 'same') for sig in sigin]

        if sigin_ndim == 1:
            filtered = filtered[0]
        else:
            filtered = np.asarray(filtered)

        return filtered
kdl_cl.py 文件源码 项目:jamespy_py3 作者: jskDr 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def cell_fd_conv(cell_df, h144=None):
    Limg, Lx, Ly = cell_fd_info(cell_df)
    if h144 is None:
        h144 = get_h2d(Lx, Ly, l=405, z=0.5, dx=2.2/4, dy=2.2/4)

    cell_img_fd_l = []
    for l in range(Limg):
        cell_img = cell_df[cell_df["ID"] == l]["image"].values.reshape(Lx, Ly)
        #cell_img_fd = fd_conv(cell_img, h144)
        cell_img_fd = fftconvolve(cell_img, h144, mode='same')
        cell_img_fd_l.append(cell_img_fd)

    cell_img_fd_a = np.array(cell_img_fd_l)
    #print( cell_img_fd_a.shape)

    return cell_img_fd_a
kdl.py 文件源码 项目:jamespy_py3 作者: jskDr 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def cell_fd_conv(cell_df, h144=None):
    Limg, Lx, Ly = cell_fd_info(cell_df)
    if h144 is None:
        h144 = get_h2d(Lx, Ly, l=405, z=0.5, dx=2.2/4, dy=2.2/4)

    cell_img_fd_l = []
    for l in range(Limg):
        cell_img = cell_df[cell_df["ID"] == l]["image"].values.reshape(Lx, Ly)
        #cell_img_fd = fd_conv(cell_img, h144)
        cell_img_fd = fftconvolve(cell_img, h144, mode='same')
        cell_img_fd_l.append(cell_img_fd)

    cell_img_fd_a = np.array(cell_img_fd_l)
    #print( cell_img_fd_a.shape)

    return cell_img_fd_a
cell.py 文件源码 项目:jamespy_py3 作者: jskDr 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def cell_fd_conv(cell_df, h144=None):
    Limg, Lx, Ly = cell_fd_info(cell_df)
    if h144 is None:
        h144 = get_h2d(Lx, Ly, l=405, z=0.5, dx=2.2/4, dy=2.2/4)

    cell_img_fd_l = []
    for l in range(Limg):
        cell_img = cell_df[cell_df["ID"] == l]["image"].values.reshape(Lx, Ly)
        #cell_img_fd = fd_conv(cell_img, h144)
        cell_img_fd = fftconvolve(cell_img, h144, mode='same')
        cell_img_fd_l.append(cell_img_fd)

    cell_img_fd_a = np.array(cell_img_fd_l)
    #print( cell_img_fd_a.shape)

    return cell_img_fd_a
ilt.py 文件源码 项目:DimmiLitho 作者: vincentlv 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def mask_init(self):
        x = np.linspace(-10,10,21)
        X, Y = np.meshgrid(x,x)
        R = X**2 + Y**2
        O = np.exp(-R/2/(4**2))
        OO = O/np.sum(O)
        D = sg.fftconvolve(1.0*self.image.mask.data+0.0, OO,'same')
        # D = pyfftw.interfaces.scipy_fftpack.convolve(1.0*self.image.mask.data+0.0, OO,'same')

        self.target = copy.deepcopy(self.image.mask.data)
        self.maskdata = 0.99*D + 0.01
        AA = 2*self.maskdata - 1
        AA = np.complex64(AA)
        BB = np.arccos(AA)
        self.masktheta = BB.real

        self.image.mask.data = self.maskdata
stressmodels.py 文件源码 项目:pastas 作者: pastas 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def simulate(self, p, tindex=None, dt=1):
        """Simulates the head contribution.

        Parameters
        ----------
        p: 1D array
           Parameters used for simulation.
        tindex: pandas.Series, optional
           Time indices to simulate the model.

        Returns
        -------
        pandas.Series
            The simulated head contribution.

        """
        b = self.rfunc.block(p, dt)
        stress = self.stress[0]
        self.npoints = stress.index.size
        h = pd.Series(fftconvolve(stress, b, 'full')[:self.npoints],
                      index=stress.index, name=self.name)
        if tindex is not None:
            h = h[tindex]
        return h
blur_image.py 文件源码 项目:DeblurGAN 作者: KupynOrest 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def blur_image(self, save=False, show=False):
        if self.part is None:
            psf = self.PSFs
        else:
            psf = [self.PSFs[self.part]]
        yN, xN, channel = self.shape
        key, kex = self.PSFs[0].shape
        delta = yN - key
        assert delta >= 0, 'resolution of image should be higher than kernel'
        result=[]
        if len(psf) > 1:
            for p in psf:
                tmp = np.pad(p, delta // 2, 'constant')
                cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
                # blured = np.zeros(self.shape)
                blured = cv2.normalize(self.original, self.original, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,
                                       dtype=cv2.CV_32F)
                blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))
                blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))
                blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))
                blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
                blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)
                result.append(np.abs(blured))
        else:
            psf = psf[0]
            tmp = np.pad(psf, delta // 2, 'constant')
            cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
            blured = cv2.normalize(self.original, self.original, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,
                                   dtype=cv2.CV_32F)
            blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))
            blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))
            blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))
            blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
            blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)
            result.append(np.abs(blured))
        self.result = result
        if show or save:
            self.__plot_canvas(show, save)
stream.py 文件源码 项目:bark 作者: kylerbrown 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def convolve(self, win):
        " Convolves each channel with window win."
        from scipy.signal import fftconvolve

        def conv_func(x):
            return np.column_stack([fftconvolve(x[:, i], win)
                                    for i in range(x.shape[1])])

        return self.new_stream(self.vector_map(conv_func))
test_stream.py 文件源码 项目:bark 作者: kylerbrown 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_convolve():
    win = [.1, 0, 3]
    for data in (data2, data3, data4):
        x = np.column_stack([fftconvolve(data[:, i], win)
                             for i in range(data.shape[1])])
        y = Stream(data, sr=1).convolve(win).call()
        assert eq(x, y)
chx_correlation.py 文件源码 项目:chxanalys 作者: yugangzhang 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _cross_corr(img1, img2=None):
    ''' Compute the cross correlation of one (or two) images.
        Parameters
        ----------
        img1 : np.ndarray
            the image or curve to cross correlate
        img2 : 1d or 2d np.ndarray, optional
            If set, cross correlate img1 against img2.  A shift of img2
            to the right of img1 will lead to a shift of the point of
            highest correlation to the right.
            Default is set to None
    '''
    ndim = img1.ndim

    if img2 is None:
        img2 = img1

    if img1.shape != img2.shape:
        errorstr = "Image shapes don't match. "
        errorstr += "(img1 : {},{}; img2 : {},{})"\
            .format(*img1.shape, *img2.shape)
        raise ValueError(errorstr)

    # need to reverse indices for second image
    # fftconvolve(A,B) = FFT^(-1)(FFT(A)*FFT(B))
    # but need FFT^(-1)(FFT(A(x))*conj(FFT(B(x)))) = FFT^(-1)(A(x)*B(-x))
    reverse_index = [slice(None, None, -1) for i in range(ndim)]
    imgc = fftconvolve(img1, img2[reverse_index], mode='same')

    return imgc
IRantoine.py 文件源码 项目:mirapie 作者: Chutlhu 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def smoothLine(data,kernel):
    """helper function parallelized by smooth"""
    return signal.fftconvolve(data,kernel, mode='same')
text_utils.py 文件源码 项目:SynthText 作者: ankush-me 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def place_text(self, text_arrs, back_arr, bbs):
        areas = [-np.prod(ta.shape) for ta in text_arrs]
        order = np.argsort(areas)

        locs = [None for i in range(len(text_arrs))]
        out_arr = np.zeros_like(back_arr)
        for i in order:            
            ba = np.clip(back_arr.copy().astype(np.float), 0, 255)
            ta = np.clip(text_arrs[i].copy().astype(np.float), 0, 255)
            ba[ba > 127] = 1e8
            intersect = ssig.fftconvolve(ba,ta[::-1,::-1],mode='valid')
            safemask = intersect < 1e8

            if not np.any(safemask): # no collision-free position:
                #warn("COLLISION!!!")
                return back_arr,locs[:i],bbs[:i],order[:i]

            minloc = np.transpose(np.nonzero(safemask))
            loc = minloc[np.random.choice(minloc.shape[0]),:]
            locs[i] = loc

            # update the bounding-boxes:
            bbs[i] = move_bb(bbs[i],loc[::-1])

            # blit the text onto the canvas
            w,h = text_arrs[i].shape
            out_arr[loc[0]:loc[0]+w,loc[1]:loc[1]+h] += text_arrs[i]

        return out_arr, locs, bbs, order
ssim.py 文件源码 项目:DPED 作者: aiff22 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _SSIMForMultiScale(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03):

    img1 = img1.astype(np.float64)
    img2 = img2.astype(np.float64)
    _, height, width, _ = img1.shape

    size = min(filter_size, height, width)
    sigma = size * filter_sigma / filter_size if filter_size else 0

    if filter_size:

        window = np.reshape(_FSpecialGauss(size, sigma), (1, size, size, 1))
        mu1 = signal.fftconvolve(img1, window, mode='valid')
        mu2 = signal.fftconvolve(img2, window, mode='valid')
        sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')
        sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')
        sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')

    else:

        mu1, mu2 = img1, img2
        sigma11 = img1 * img1
        sigma22 = img2 * img2
        sigma12 = img1 * img2

    mu11 = mu1 * mu1
    mu22 = mu2 * mu2
    mu12 = mu1 * mu2
    sigma11 -= mu11
    sigma22 -= mu22
    sigma12 -= mu12

    c1 = (k1 * max_val) ** 2
    c2 = (k2 * max_val) ** 2
    v1 = 2.0 * sigma12 + c2
    v2 = sigma11 + sigma22 + c2

    ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)))
    cs = np.mean(v1 / v2)

    return ssim, cs
dnoise_int.py 文件源码 项目:pysptools 作者: ctherien 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _denoise1d(self, M, window_size, order, deriv, rate):
        try:
            window_size = np.abs(np.int(window_size))
            order = np.abs(np.int(order))
        except ValueError as msg:
            raise ValueError("in SavitzkyGolay.denoise_spectra(), window_size and order have to be of type int")
        if window_size % 2 != 1 or window_size < 1:
            raise TypeError("in SavitzkyGolay.denoise_spectra(), window_size size must be a positive odd number")
        if window_size < order + 2:
            raise TypeError("in SavitzkyGolay.denoise_spectra(), window_size is too small for the polynomials order")

        order_range = range(order+1)
        half_window = (window_size -1) // 2
        # precompute coefficients
        b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
        m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
        # pad the signal at the extremes with
        # values taken from the signal itself
        N, p = M.shape
        dn = np.ones((N,p), dtype=np.float)
        long_signal = np.ndarray(p+2, dtype=np.float)
        for i in range(N):
            y = M[i]
            firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
            lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
            long_signal = np.concatenate((firstvals, y, lastvals))
            dn[i] = fftconvolve(long_signal, m, mode='valid')
        return dn
render.py 文件源码 项目:picasso 作者: jungmannlab 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def _fftconvolve(image, blur_width, blur_height):
    kernel_width = 10 * int(_np.round(blur_width)) + 1
    kernel_height = 10 * int(_np.round(blur_height)) + 1
    kernel_y = _signal.gaussian(kernel_height, blur_height)
    kernel_x = _signal.gaussian(kernel_width, blur_width)
    kernel = _np.outer(kernel_y, kernel_x)
    kernel /= kernel.sum()
    return _signal.fftconvolve(image, kernel, mode='same')
filter_sim_routines.py 文件源码 项目:sketchrls 作者: LCAV 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def generate_signal(n, p, loops, SNR_dB=100, noise='white', h=None):

    # First generate a random signal
    if noise == 'pink':
        x = noise_pink(n, rows=loops, alpha=1e-10)
    elif noise == 'ar1':
        x = noise_ar1(n, rows=loops)
    else:
        x = noise_white(n, rows=loops)

    # Generate random filters on the sphere
    if h is None:
        h = np.random.randn(loops,p)
        norm = np.linalg.norm(h, axis=1)
        h = (h.T/norm).T

    if h.ndim == 1:
        if h.shape[0] >= p:
            h = np.tile(h[:p], (loops,1))
        else:
            h2 = np.zeros(loops,p)
            for i in xrange(loops):
                h2[i,:h.shape[0]] = h
            h = h2

    # Finally generate the filtered signal
    sigma_noise = 10.**(-SNR_dB/20.)
    d = np.zeros((loops,n+h.shape[1]-1))
    for l in xrange(loops):
        d[l,:] = fftconvolve(x[l], h[l])
        d[l,:] += np.random.randn(n+h.shape[1]-1)*sigma_noise

    return x, h, d
noise.py 文件源码 项目:sketchrls 作者: LCAV 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def noise_ar1(n, rows=1, a1=0.9):

    x = noise_white(n, rows=rows)
    for row in x:
        row[:] = fftconvolve(row, np.array([1., a1]), mode='same')
    x = (x.T/np.sqrt(np.mean(x**2, axis=1))).T

    return x
base_dar.py 文件源码 项目:pactools 作者: pactools 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _remove_far_masked_data(self, mask, list_signals):
        """Remove unnecessary data which is masked
        and far (> self.ordar) from the unmasked data.
        """
        if mask is None:
            return list_signals

        selection = ~mask

        # convolution with a delay kernel,
        # so we keep the close points before the selection
        kernel = np.ones(self.ordar * 2 + 1)
        kernel[-self.ordar:] = 0.
        delayed_selection = fftconvolve(selection, kernel[None, :],
                                        mode='same')
        # remove numerical error from fftconvolve
        delayed_selection[np.abs(delayed_selection) < 1e-13] = 0.

        time_selection = delayed_selection.sum(axis=0) != 0
        epoch_selection = delayed_selection.sum(axis=1) != 0

        if not np.any(time_selection) or not np.any(epoch_selection):
            raise ValueError("The mask seems to hide everything.")

        output_signals = []
        for sig in list_signals:
            if sig is not None:
                sig = sig[..., epoch_selection, :]
                sig = sig[..., :, time_selection]
            output_signals.append(sig)

        return output_signals
arma.py 文件源码 项目:pactools 作者: pactools 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def inverse(self, sigin):
        """Apply the inverse ARMA filter to a signal

        sigin : input signal (ndarray)

        returns the filtered signal(ndarray)

        """
        arpart = np.concatenate((np.ones(1), self.AR_))
        return signal.fftconvolve(sigin, arpart, 'same')
kdl_cl.py 文件源码 项目:jamespy_py3 作者: jskDr 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def fd_conv(Img_xy, h2d, mode ='same'):
    #return convolve2d(Img_xy, h2d, mode=mode)
    return fftconvolve(Img_xy, h2d, mode=mode)
kdl.py 文件源码 项目:jamespy_py3 作者: jskDr 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def fd_conv(Img_xy, h2d, mode ='same'):
    #return convolve2d(Img_xy, h2d, mode=mode)
    return fftconvolve(Img_xy, h2d, mode=mode)
stressmodels.py 文件源码 项目:pastas 作者: pastas 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def simulate(self, p, tindex=None, dt=1):
        """Simulates the head contribution.

        Parameters
        ----------
        p: 1D array
           Parameters used for simulation.
        tindex: pandas.Series, optional
           Time indices to simulate the model.

        Returns
        -------
        pandas.Series
            The simulated head contribution.

        """
        b = self.rfunc.block(p[:-1], dt)
        self.npoints = self.stress[0].index.size
        stress = self.get_stress(p=p)
        h = pd.Series(fftconvolve(stress, b, 'full')[:self.npoints],
                      index=self.stress[0].index, name=self.name)
        if tindex is not None:
            h = h[tindex]
        # see whether it makes a difference to subtract gain * mean_stress
        # h -= self.rfunc.gain(p) * stress.mean()
        return h
stressmodels.py 文件源码 项目:pastas 作者: pastas 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def simulate(self, p, tindex=None, dt=1):
        dt = int(dt)
        b = self.rfunc.block(p[:-self.recharge.nparam], dt)  # Block response
        # The recharge calculation needs arrays
        precip_array = np.array(self.stress["prec"])
        evap_array = np.array(self.stress["evap"])
        rseries = self.recharge.simulate(precip_array, evap_array,
                                         p[-self.recharge.nparam:])
        self.npoints = len(rseries)
        h = pd.Series(fftconvolve(rseries, b, 'full')[:self.npoints],
                      index=self.stress["prec"].index, name=self.name)
        if tindex is not None:
            h = h[tindex]
        return h
stressmodels.py 文件源码 项目:pastas 作者: pastas 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def simulate(self, p=None, tindex=None, dt=1):
        h = pd.Series(data=0, index=self.stress[0].index, name=self.name)
        for i in self.stress:
            self.npoints = self.stress.index.size
            b = self.rfunc.block(p, self.r[i])  # nparam-1 depending on rfunc
            h += fftconvolve(self.stress[i], b, 'full')[:self.npoints]
        if tindex is not None:
            h = h[tindex]
        return h
window_processing.py 文件源码 项目:Inertial-Orbit-Detection 作者: MatrixAI 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def freq_from_autocorr(signal, sampling_rate):

    corr = fftconvolve(signal, signal[::-1], mode='full')
    corr = corr[len(corr)//2:]
    d = np.diff(corr)
    start = find_index_by_true(d > 0)[0]
    peak = np.argmax(corr[start:]) + start
    px, py = parabolic(corr, peak)
    return sampling_rate / px
preview_wav.py 文件源码 项目:ddc 作者: chrisdonahue 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def write_preview_wav(wav_fp, note_beats_and_abs_times, wav_fs=11025.0):
    wav_len = int(wav_fs * (note_beats_and_abs_times[-1][1] + 0.05))
    dt = 1.0 / wav_fs

    note_type_to_idx = {}
    idx = 0
    for _, beat, time, note_type in note_beats_and_abs_times:
        if note_type == '0' * len(note_type):
            continue
        if note_type not in note_type_to_idx:
            note_type_to_idx[note_type] = idx
            idx += 1
    num_note_types = len(note_type_to_idx)

    pulse_f = np.zeros((num_note_types, wav_len))

    for _, beat, time, note_type in note_beats_and_abs_times:
        sample = int(time * wav_fs)
        if sample > 0 and sample < wav_len and note_type in note_type_to_idx:
            pulse_f[note_type_to_idx[note_type]][sample] = 1.0

    scale = [440.0, 587.33, 659.25, 783.99]
    freqs = [scale[i % 4] * math.pow(2.0, (i // 4) - 1) for i in xrange(num_note_types)]
    metro_f = np.zeros(wav_len)
    for idx in xrange(num_note_types):
        click_len = 0.05
        click_t = np.arange(0.0, click_len, dt)
        click_atk = 0.02
        click_sus = 0.5
        click_rel = 0.2
        click_env = _linterp(0.0, [(click_atk, 1.0), (click_sus, 1.0), (click_rel, 0.0)], len(click_t))
        click_f = click_env * np.sin(2.0 * np.pi * freqs[idx] * click_t)

        metro_f += fftconvolve(pulse_f[idx], click_f, mode='full')[:wav_len]
        #metro_f += pulse_f[idx][:wav_len]

    _wav_write(wav_fp, wav_fs, metro_f, normalize=True)
process.py 文件源码 项目:sound_field_analysis-py 作者: QULab 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def convolve(A, B, FFT=None):
    """ Convolve two arrrays A & B row-wise. One or both can be one-dimensional for SIMO/SISO convolution

    Parameters
    ----------
    A, B: array_like
       Data to perform the convolution on of shape [Nsignals x NSamples]
    FFT: bool, optional
       Selects wether time or frequency domain convolution is applied. Default: On if Nsamples > 500 for both

    Returns
    -------
    out: array
       Array containing row-wise, linear convolution of A and B
    """
    A = _np.atleast_2d(A)
    B = _np.atleast_2d(B)

    N_sigA, L_sigA = A.shape
    N_sigB, L_sigB = B.shape

    if FFT is None and (L_sigA > 500 and L_sigB > 500):
        FFT = True
    else:
        FFT = False

    if (N_sigA != N_sigB) and not (N_sigA == 1 or N_sigB == 1):
        raise ValueError('Number of rows must either match or at least one must be one-dimensional.')

    if N_sigA == 1 and N_sigB != 1:
        A = _np.broadcast_to(A, (N_sigB, L_sigA))
    elif N_sigA != 1 and N_sigB == 1:
        B = _np.broadcast_to(B, (N_sigA, L_sigB))

    out = []

    for IDX, cur_row in enumerate(A):
        if FFT:
            out.append(fftconvolve(cur_row, B[IDX]))
        else:
            out.append(_np.convolve(cur_row, B[IDX]))

    return _np.array(out)


问题


面经


文章

微信
公众号

扫码关注公众号