python类gaussian()的实例源码

smoothing.py 文件源码 项目:PySAT 作者: USGS-Astrogeology 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def gaussian(y, window_size=3, sigma=2):
    """
    Apply a gaussian filter to smooth the input vector

    Parameters
    ==========
    y :  array
         The input array

    window_size : int
                  An odd integer describing the size of the filter.

    sigma : float
            The numver of standard deviation
    """
    filt = signal.gaussian(window_size, sigma)
    return Series(signal.convolve(y, filt, mode='same'), index=y.index)
render.py 文件源码 项目:picasso 作者: jungmannlab 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def render(locs, info=None, oversampling=1, viewport=None, blur_method=None, min_blur_width=0):
    if viewport is None:
        try:
            viewport = [(0, 0), (info[0]['Height'], info[0]['Width'])]
        except TypeError:
            raise ValueError('Need info if no viewport is provided.')
    (y_min, x_min), (y_max, x_max) = viewport
    if blur_method is None:
        return render_hist(locs, oversampling, y_min, x_min, y_max, x_max)
    elif blur_method == 'gaussian':
        return render_gaussian(locs, oversampling, y_min, x_min, y_max, x_max, min_blur_width)
    elif blur_method == 'gaussian_iso':
        return render_gaussian_iso(locs, oversampling, y_min, x_min, y_max, x_max, min_blur_width)
    elif blur_method == 'smooth':
        return render_smooth(locs, oversampling, y_min, x_min, y_max, x_max)
    elif blur_method == 'convolve':
        return render_convolve(locs, oversampling, y_min, x_min, y_max, x_max, min_blur_width)
    else:
        raise Exception('blur_method not understood.')
generalmethods.py 文件源码 项目:qudi 作者: Ulm-IQO 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def gaussian_smoothing(self, data=None, filter_len=None, filter_sigma=None):
    """ This method convolves the data with a gaussian
     the smoothed data is returned

    @param array data: raw data
    @param int filter_len: length of filter
    @param int filter_sigma: width of gaussian

    @return array: smoothed data

    """
    #Todo: Check for wrong data type
    if filter_len is None:
        if len(data) < 20.:
            filter_len = 5
        elif len(data) >= 100.:
            filter_len = 10
        else:
            filter_len = int(len(data) / 10.) + 1
    if filter_sigma is None:
        filter_sigma = filter_len

    gaus = gaussian(filter_len, filter_sigma)
    return filters.convolve1d(data, gaus / gaus.sum(), mode='mirror')
test_signal.py 文件源码 项目:pwtools 作者: elcorto 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_smooth_1d():
    for edge in ['m', 'c']:
        for N in [20,21]:
            # values in [9.0,11.0]
            x = rand(N) + 10
            mn = 9.0
            mx = 11.0
            for M in range(18,27):
                print "1d", edge, "N=%i, M=%i" %(N,M)
                xsm = smooth(x, gaussian(M,2.0), edge=edge)
                assert len(xsm) == N
                # (N,1) case
                xsm2 = smooth(x[:,None], gaussian(M,2.0)[:,None], edge=edge)
                assert np.allclose(xsm, xsm2[:,0], atol=1e-14, rtol=1e-12)
                # Smoothed signal should not go to zero if edge effects are handled
                # properly. Also assert proper normalization (i.e. smoothed signal
                # is "in the middle" of the noisy original data).
                assert xsm.min() >= mn
                assert xsm.max() <= mx
                assert mn <= xsm[0] <= mx
                assert mn <= xsm[-1] <= mx
            # convolution with delta peak produces same data exactly
            assert np.allclose(smooth(x, np.array([0.0,1,0]), edge=edge),x, atol=1e-14,
                               rtol=1e-12)
test_signal.py 文件源码 项目:pwtools 作者: elcorto 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_smooth_nd():
    for edge in ['m', 'c']:
        a = rand(20, 2, 3) + 10
        for M in [5, 20, 123]:
            print "nd", edge, "M=%i" %M
            kern = gaussian(M, 2.0)
            asm = smooth(a, kern[:,None,None], axis=0, edge=edge)
            assert asm.shape == a.shape
            for jj in range(asm.shape[1]):
                for kk in range(asm.shape[2]):
                    assert np.allclose(asm[:,jj,kk], smooth(a[:,jj,kk], kern, 
                                                            edge=edge))
                    mn = a[:,jj,kk].min()
                    mx = a[:,jj,kk].max()
                    smn = asm[:,jj,kk].min()
                    smx = asm[:,jj,kk].max()
                    assert smn >= mn, "min: data=%f, smooth=%f" %(mn, smn)
                    assert smx <= mx, "max: data=%f, smooth=%f" %(mx, smx)
common.py 文件源码 项目:ProtScan 作者: gianlucacorrado 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def smooth(votes, **params):
    """Compute the convolution with a Gaussian signal."""
    window = params.get('window', 50)
    std = params.get('std', 20)
    profiles = dict()
    window = gaussian(window, std=std)
    for k, vote in votes.iteritems():
        smoothed = convolve(vote, window, mode='same')
        profiles[k] = smoothed
    return profiles
textgridview.py 文件源码 项目:coquery 作者: gkunter 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def _get_spectrogram(self, **kwargs):
        NFFT = int(self.sound().framerate * self.windowLength())
        noverlap = kwargs.get("noverlap", int(NFFT / 2))
        data, ybins, xbins, im = self.ax_spectrogram.specgram(
            self.sound().astype(np.int32),
            NFFT=NFFT,
            Fs=self.sound().framerate,
            noverlap=noverlap,
            window=signal.gaussian(M=NFFT, std=noverlap))
        self._extent = [xbins.min(), xbins.max(), ybins.min(), ybins.max()]
        self._spectrogram = self.transform(data)
best_solution_in_the_wuuuuuuurld.py 文件源码 项目:HashCode 作者: sbrodehl 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _gkern2(kernlen=21, nsig=3):
    """Returns a 2D Gaussian kernel array."""

    # create nxn zeros
    inp = np.zeros((kernlen, kernlen))
    # set element at the middle to one, a dirac delta
    inp[kernlen // 2, kernlen // 2] = 1
    # gaussian-smooth the dirac, resulting in a gaussian filter mask
    return fi.gaussian_filter(inp, nsig)
part1.py 文件源码 项目:MOOCs 作者: ankitaggarwal011 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def make_gaussian(k, std):
  '''Create a gaussian kernel.

  Input:

  k - the radius of the kernel.

  std - the standard deviation of the kernel.

  Output:

  output - a numpy array of shape (2k+1, 2k+1) and dtype float.

  If gaussian_1d is a gaussian filter of length 2k+1 in one dimension, 
  kernel[i,j] should be filled with the product of gaussian_1d[i] and 
  gaussian_1d[j].

  Once all the points are filled, the kernel should be scaled so that the sum
  of all cells is equal to one.'''
  kernel = None
  # Insert your code here.----------------------------------------------------
  kernel=np.zeros((2*k+1,2*k+1),dtype=np.float)
  gaussian_1d = signal.gaussian(2*k+1,std)
  for i in range(gaussian_1d.shape[0]):
    for j in range(gaussian_1d.shape[0]):
      kernel[i,j]=gaussian_1d[i]*gaussian_1d[j]
  kernelsum = kernel.sum()
  kernel = kernel/kernelsum
  #---------------------------------------------------------------------------
  return kernel
render.py 文件源码 项目:picasso 作者: jungmannlab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _fftconvolve(image, blur_width, blur_height):
    kernel_width = 10 * int(_np.round(blur_width)) + 1
    kernel_height = 10 * int(_np.round(blur_height)) + 1
    kernel_y = _signal.gaussian(kernel_height, blur_height)
    kernel_x = _signal.gaussian(kernel_width, blur_width)
    kernel = _np.outer(kernel_y, kernel_x)
    kernel /= kernel.sum()
    return _signal.fftconvolve(image, kernel, mode='same')
poissonianlikemethods.py 文件源码 项目:qudi 作者: Ulm-IQO 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def poisson(self, x, mu):
    """
    Poisson function taken from:
    https://github.com/scipy/scipy/blob/master/scipy/stats/_discrete_distns.py

    For license see documentation/BSDLicense_scipy.md

    Author:  Travis Oliphant  2002-2011 with contributions from
             SciPy Developers 2004-2011
    """
    if len(np.atleast_1d(x)) == 1:
        check_val = x
    else:
        check_val = x[0]

    if check_val > 1e18:
        self.log.warning('The current value in the poissonian distribution '
                         'exceeds 1e18! Due to numerical imprecision a valid '
                         'functional output cannot be guaranteed any more!')

    # According to the central limit theorem, a poissonian distribution becomes
    # a gaussian distribution for large enough x. Since the numerical precision
    # is limited to calculate the logarithmized poissonian and obtain from that
    # the exponential value, a self defined cutoff is introduced and set to
    # 1e12. Beyond that number a gaussian distribution is assumed, which is a
    # completely valid assumption.

    if check_val < 1e12:
        return np.exp(xlogy(x, mu) - gammaln(x + 1) - mu)
    else:
        return np.exp(-((x - mu) ** 2) / (2 * mu)) / (np.sqrt(2 * np.pi * mu))
poissonianlikemethods.py 文件源码 项目:qudi 作者: Ulm-IQO 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def estimate_poissonian(self, x_axis, data, params):
    """ Provide an estimator for initial values of a poissonian function.

    @param numpy.array x_axis: 1D axis values
    @param numpy.array data: 1D data, should have the same dimension as x_axis.
    @param lmfit.Parameters params: object includes parameter dictionary which
                                    can be set

    @return tuple (error, params):

    Explanation of the return parameter:
        int error: error code (0:OK, -1:error)
        Parameters object params: set parameters of initial values
    """

    error = self._check_1D_input(x_axis=x_axis, data=data, params=params)

    # a gaussian filter is appropriate due to the well approximation of poisson
    # distribution
    # gaus = gaussian(10,10)
    # data_smooth = filters.convolve1d(data, gaus/gaus.sum(), mode='mirror')
    data_smooth = self.gaussian_smoothing(data=data, filter_len=10,
                                          filter_sigma=10)

    # set parameters
    mu = x_axis[np.argmax(data_smooth)]
    params['mu'].value = mu
    params['amplitude'].value = data_smooth.max() / self.poisson(mu, mu)

    return error, params
fit_logic_standalone.py 文件源码 项目:qudi 作者: Ulm-IQO 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def gaussianlinearoffset_testing_data():

    x = np.linspace(0, 5, 30)
    x_nice=np.linspace(0, 5, 101)

    mod_final,params = qudi_fitting.make_gaussianwithslope_model()

    data=np.loadtxt("./../1D_shllow.csv")
    data_noisy=data[:,1]
    data_fit=data[:,3]
    x=data[:,2]


    update=dict()
    update["slope"]={"min":-np.inf,"max":np.inf}
    update["offset"]={"min":-np.inf,"max":np.inf}
    update["sigma"]={"min":-np.inf,"max":np.inf}
    update["center"]={"min":-np.inf,"max":np.inf}
    update["amplitude"]={"min":-np.inf,"max":np.inf}
    result=qudi_fitting.make_gaussianwithslope_fit(x_axis=x, data=data_noisy, add_params=update)
#
##
#    gaus=gaussian(3,5)
#    qudi_fitting.data_smooth = filters.convolve1d(qudi_fitting.data_noisy, gaus/gaus.sum(),mode='mirror')

    plt.plot(x,data_noisy,label="data")
    plt.plot(x,data_fit,"k",label="old fit")
    plt.plot(x,result.init_fit,'-g',label='init')
    plt.plot(x,result.best_fit,'-r',label='fit')
    plt.legend()
    plt.show()
    print(result.fit_report())
fit_logic_standalone.py 文件源码 项目:qudi 作者: Ulm-IQO 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def poissonian_testing():
    start=0
    stop=30
    mu=8
    num_points=1000
    x = np.array(np.linspace(start, stop, num_points))
#            x = np.array(x,dtype=np.int64)
    mod,params = qudi_fitting.make_poissonian_model()
    print('Parameters of the model',mod.param_names)

    p=Parameters()
    p.add('mu',value=mu)
    p.add('amplitude',value=200.)

    data_noisy=(mod.eval(x=x,params=p) *
                np.array((1+0.001*np.random.normal(size=x.shape) *
                p['amplitude'].value ) ) )

    print('all int',all(isinstance(item, (np.int32,int, np.int64)) for item in x))
    print('int',isinstance(x[1], int),float(x[1]).is_integer())
    print(type(x[1]))
    #make the filter an extra function shared and usable for other functions
    gaus=gaussian(10,10)
    data_smooth = filters.convolve1d(data_noisy, gaus/gaus.sum(),mode='mirror')


    result = qudi_fitting.make_poissonian_fit(x, data_noisy)
    print(result.fit_report())

    plt.figure()
    plt.plot(x, data_noisy, '-b', label='noisy data')
    plt.plot(x, data_smooth, '-g', label='smoothed data')
    plt.plot(x,result.init_fit,'-y', label='initial values')
    plt.plot(x,result.best_fit,'-r',linewidth=2.0, label='fit')
    plt.xlabel('counts')
    plt.ylabel('occurences')
    plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
               ncol=2, mode="expand", borderaxespad=0.)
    plt.show()
fit_logic_standalone.py 文件源码 项目:qudi 作者: Ulm-IQO 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def double_poissonian_testing():
    """ Testing of double poissonian with self created data.
    First version of double poissonian fit."""

    start=100
    stop=300
    num_points=int((stop-start)+1)*100
    x = np.linspace(start, stop, num_points)

    # double poissonian
    mod,params = qudi_fitting.make_multiplepoissonian_model(no_of_functions=2)
    print('Parameters of the model',mod.param_names)
    parameter=Parameters()
    parameter.add('p0_mu',value=200)
    parameter.add('p1_mu',value=240)
    parameter.add('p0_amplitude',value=1)
    parameter.add('p1_amplitude',value=1)
    data_noisy = ( np.array(mod.eval(x=x,params=parameter)) *
                   np.array((1+0.2*np.random.normal(size=x.shape) )*
                   parameter['p1_amplitude'].value) )


    #make the filter an extra function shared and usable for other functions
    gaus=gaussian(10,10)
    data_smooth = filters.convolve1d(data_noisy, gaus/gaus.sum(),mode='mirror')

    result = qudi_fitting.make_doublepoissonian_fit(x, data_noisy)
    print(result.fit_report())

    plt.figure()
    plt.plot(x, data_noisy, '-b', label='noisy data')
    plt.plot(x, data_smooth, '-g', label='smoothed data')
    plt.plot(x,result.init_fit,'-y', label='initial values')
    plt.plot(x,result.best_fit,'-r',linewidth=2.0, label='fit')
    plt.xlabel('counts')
    plt.ylabel('occurences')
    plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
               ncol=2, mode="expand", borderaxespad=0.)
    plt.show()
train_test_deploy_new.py 文件源码 项目:FingerNet 作者: felixTY 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def orientation(image, stride=8, window=17):
    with K.tf.name_scope('orientation'):
        assert image.get_shape().as_list()[3] == 1, 'Images must be grayscale'
        strides = [1, stride, stride, 1]
        E = np.ones([window, window, 1, 1])
        sobelx = np.reshape(np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=float), [3, 3, 1, 1])
        sobely = np.reshape(np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype=float), [3, 3, 1, 1])
        gaussian = np.reshape(gaussian2d((5, 5), 1), [5, 5, 1, 1])
        with K.tf.name_scope('sobel_gradient'):
            Ix = K.tf.nn.conv2d(image, sobelx, strides=[1,1,1,1], padding='SAME', name='sobel_x')
            Iy = K.tf.nn.conv2d(image, sobely, strides=[1,1,1,1], padding='SAME', name='sobel_y')
        with K.tf.name_scope('eltwise_1'):
            Ix2 = K.tf.multiply(Ix, Ix, name='IxIx')
            Iy2 = K.tf.multiply(Iy, Iy, name='IyIy')
            Ixy = K.tf.multiply(Ix, Iy, name='IxIy')
        with K.tf.name_scope('range_sum'):
            Gxx = K.tf.nn.conv2d(Ix2, E, strides=strides, padding='SAME', name='Gxx_sum')
            Gyy = K.tf.nn.conv2d(Iy2, E, strides=strides, padding='SAME', name='Gyy_sum')
            Gxy = K.tf.nn.conv2d(Ixy, E, strides=strides, padding='SAME', name='Gxy_sum')
        with K.tf.name_scope('eltwise_2'):
            Gxx_Gyy = K.tf.subtract(Gxx, Gyy, name='Gxx_Gyy')
            theta = atan2([2*Gxy, Gxx_Gyy]) + np.pi
        # two-dimensional low-pass filter: Gaussian filter here
        with K.tf.name_scope('gaussian_filter'):
            phi_x = K.tf.nn.conv2d(K.tf.cos(theta), gaussian, strides=[1,1,1,1], padding='SAME', name='gaussian_x')
            phi_y = K.tf.nn.conv2d(K.tf.sin(theta), gaussian, strides=[1,1,1,1], padding='SAME', name='gaussian_y')
            theta = atan2([phi_y, phi_x])/2
    return theta
utils.py 文件源码 项目:FingerNet 作者: felixTY 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def gaussian2d(shape=(5,5),sigma=0.5):
    """
    2D gaussian mask - should give the same result as MATLAB's
    fspecial('gaussian',[shape],[sigma])
    """
    m,n = [(ss-1.)/2. for ss in shape]
    y,x = np.ogrid[-m:m+1,-n:n+1]
    h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
    h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
    sumh = h.sum()
    if sumh != 0:
        h /= sumh
    return h
utils.py 文件源码 项目:FingerNet 作者: felixTY 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def gausslabel(length=180, stride=2):
    gaussian_pdf = signal.gaussian(length+1, 3)
    label = np.reshape(np.arange(stride/2, length, stride), [1,1,-1,1])
    y = np.reshape(np.arange(stride/2, length, stride), [1,1,1,-1])
    delta = np.array(np.abs(label - y), dtype=int)
    delta = np.minimum(delta, length-delta)+length/2
    return gaussian_pdf[delta]
train_test_deploy.py 文件源码 项目:FingerNet 作者: felixTY 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def orientation(image, stride=8, window=17):
    with K.tf.name_scope('orientation'):
        assert image.get_shape().as_list()[3] == 1, 'Images must be grayscale'
        strides = [1, stride, stride, 1]
        E = np.ones([window, window, 1, 1])
        sobelx = np.reshape(np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=float), [3, 3, 1, 1])
        sobely = np.reshape(np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype=float), [3, 3, 1, 1])
        gaussian = np.reshape(gaussian2d((5, 5), 1), [5, 5, 1, 1])
        with K.tf.name_scope('sobel_gradient'):
            Ix = K.tf.nn.conv2d(image, sobelx, strides=[1,1,1,1], padding='SAME', name='sobel_x')
            Iy = K.tf.nn.conv2d(image, sobely, strides=[1,1,1,1], padding='SAME', name='sobel_y')
        with K.tf.name_scope('eltwise_1'):
            Ix2 = K.tf.multiply(Ix, Ix, name='IxIx')
            Iy2 = K.tf.multiply(Iy, Iy, name='IyIy')
            Ixy = K.tf.multiply(Ix, Iy, name='IxIy')
        with K.tf.name_scope('range_sum'):
            Gxx = K.tf.nn.conv2d(Ix2, E, strides=strides, padding='SAME', name='Gxx_sum')
            Gyy = K.tf.nn.conv2d(Iy2, E, strides=strides, padding='SAME', name='Gyy_sum')
            Gxy = K.tf.nn.conv2d(Ixy, E, strides=strides, padding='SAME', name='Gxy_sum')
        with K.tf.name_scope('eltwise_2'):
            Gxx_Gyy = K.tf.subtract(Gxx, Gyy, name='Gxx_Gyy')
            theta = atan2([2*Gxy, Gxx_Gyy]) + np.pi
        # two-dimensional low-pass filter: Gaussian filter here
        with K.tf.name_scope('gaussian_filter'):
            phi_x = K.tf.nn.conv2d(K.tf.cos(theta), gaussian, strides=[1,1,1,1], padding='SAME', name='gaussian_x')
            phi_y = K.tf.nn.conv2d(K.tf.sin(theta), gaussian, strides=[1,1,1,1], padding='SAME', name='gaussian_y')
            theta = atan2([phi_y, phi_x])/2
    return theta
filters.py 文件源码 项目:flight-data-processor 作者: junzis 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def kernel(self, series, sigma=3):
        # fix the weight of data
        # http://www.nehalemlabs.net/prototype/blog/2014/04/12/
        #    how-to-fix-scipys-interpolating-spline-default-behavior/
        series = np.asarray(series)
        b = gaussian(25, sigma)
        averages = filters.convolve1d(series, b/b.sum())
        variances = filters.convolve1d(np.power(series-averages, 2), b/b.sum())
        variances[variances == 0] = 1
        return averages, variances
filters.py 文件源码 项目:flight-data-processor 作者: junzis 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def filter(self, X, Y):
        X, Y = self.sortxy(X, Y)

        # using gaussian kernel to get a better variances
        avg, var = self.kernel(Y)
        spl = UnivariateSpline(X, Y, k=self.k, w=1/np.sqrt(var))

        if self.interpolate:
            xmax = X[-1]
            Xfull = np.arange(xmax)
            Yfull = spl(Xfull)
            return Xfull, Yfull
        else:
            Y1 = spl(X)
            return X, Y1
signal.py 文件源码 项目:pwtools 作者: elcorto 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def lorentz(M, std=1.0, sym=True):
    """Lorentz window (same as Cauchy function). Function skeleton stolen from
    scipy.signal.gaussian().

    The Lorentz function is

    .. math:: 

        L(x) = \\frac{\Gamma}{(x-x_0)^2 + \Gamma^2}

    Here :math:`x_0 = 0` and `std` = :math:`\Gamma`.
    Some definitions use :math:`1/2\,\Gamma` instead of :math:`\Gamma`, but
    without 1/2 we get comparable peak width to Gaussians when using this
    window in convolutions, thus ``scipy.signal.gaussian(M, std=5)`` is similar
    to ``lorentz(M, std=5)``.

    Parameters
    ----------
    M : int
        number of points
    std : float 
        spread parameter :math:`\Gamma`
    sym : bool

    Returns
    -------
    w : (M,)
    """
    if M < 1:
        return np.array([])
    if M == 1:
        return np.ones(1,dtype=float)
    odd = M % 2
    if not sym and not odd:
        M = M+1
    n = np.arange(0, M) - (M - 1.0) / 2.0
    w = std / (n**2.0 + std**2.0)
    w /= w.max()
    if not sym and not odd:
        w = w[:-1]
    return w
layers.py 文件源码 项目:mctest-model 作者: Maluuba 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build(self):
        self.trainable_weights = [self.alpha]
        if self.use_gaussian_window:
            window = signal.gaussian(self.window_size, std=self.std)
        else:
            window = np.ones(self.window_size, dtype=floatX)
        self.w_gaussian = K.variable(window)
        self.trainable_weights.append(self.w_gaussian)
layers.py 文件源码 项目:mctest-model 作者: Maluuba 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def build(self, input_shape):
        self.trainable_weights = [self.alpha]
        if self.use_gaussian_window:
            self.std = 2
            window = signal.gaussian(self.window_size, std=self.std)
            self.w_gaussian = K.variable(window)
layers.py 文件源码 项目:mctest-model 作者: Maluuba 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build(self, input_shape):
        self.trainable_weights = [self.alpha]
        if self.use_gaussian_window:
            window = signal.gaussian(self.window_size, std=self.std)
        else:
            window = np.ones(self.window_size, dtype=floatX)
        self.w_gaussian = K.variable(window)
        self.trainable_weights.append(self.w_gaussian)
layers.py 文件源码 项目:mctest-model 作者: Maluuba 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def build(self, input_shape):
        self.trainable_weights = [self.alpha]
        if self.use_gaussian_window:
            window = signal.gaussian(self.window_size, std=self.std)
        else:
            window = np.ones(self.window_size, dtype=floatX)
        self.w_gaussian = K.variable(window)
        self.trainable_weights.append(self.w_gaussian)
graphics.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def calibrate_eye(self,eye_channel,realign_mark,realign_timebin,eye_medfilt_win=21,eye_gausfilt_sigma=3):
        '''
        Args
            eye_channel (list):
                the first element is the channel name for the horizontal eye position
                the second element is the channel name for the vertial eye position
            realign_mark (string):
                event marker used to align eye positions
            realign_timebin (list):
                a period of time relative to the realign_mark.
                Example: [0,100]
            eye_medfilt_win (int):
                parameter for the median filter to smooth the eye movement trajectory
            eye_gausfilt_sigma (int):
                sigma of the gaussian kernel to smooth the eye movement trajectory
        Return:
            -
        '''
        samp_time = 1000.0/self.sampling_rate[eye_channel[0]]
        # medfilt eye x, y position
        lamb_medfilt = lambda ite:signal.medfilt(ite,eye_medfilt_win)
        self.data_df[eye_channel[0]] = self.data_df[eye_channel[0]].apply(lamb_medfilt)
        self.data_df[eye_channel[1]] = self.data_df[eye_channel[1]].apply(lamb_medfilt)
        # gaussian filt eye x,y position
        lamb_gausfilt = lambda ite:ndimage.filters.gaussian_filter1d(ite,eye_gausfilt_sigma)
        self.data_df[eye_channel[0]] = self.data_df[eye_channel[0]].apply(lamb_gausfilt)
        self.data_df[eye_channel[1]] = self.data_df[eye_channel[1]].apply(lamb_gausfilt)
        # align eye to realign_mark, realign_timebin uses realign_mark as reference
        realign_poinnum = (self.data_df[realign_mark]/samp_time).values
        start_points = realign_poinnum + realign_timebin[0]/samp_time
        points_num = int((realign_timebin[1]-realign_timebin[0])/samp_time)
        for channel in eye_channel:
            align_points = list()
            for idx in self.data_df.index:
                start_point = start_points[idx]
                if ~np.isnan(start_point):
                    start_point = int(start_point)
                    end_point = start_point + points_num
                    align_point = self.data_df[channel].loc[idx][start_point:end_point]
                    align_point = align_point.mean()
                else:
                    align_point = np.nan
                align_points.append(align_point)
            self.data_df[channel] = self.data_df[channel] - align_points

    # find all saccades for all trials
graphics.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def calibrate_eye(self,eye_channel,realign_mark,realign_timebin,eye_medfilt_win=21,eye_gausfilt_sigma=3):
        '''
        Args
            eye_channel (list):
                the first element is the channel name for the horizontal eye position
                the second element is the channel name for the vertial eye position
            realign_mark (string):
                event marker used to align eye positions
            realign_timebin (list):
                a period of time relative to the realign_mark.
                Example: [0,100]
            eye_medfilt_win (int):
                parameter for the median filter to smooth the eye movement trajectory
            eye_gausfilt_sigma (int):
                sigma of the gaussian kernel to smooth the eye movement trajectory
        Return:
            -
        '''
        samp_time = 1000.0/self.sampling_rate[eye_channel[0]]
        # medfilt eye x, y position
        lamb_medfilt = lambda ite:signal.medfilt(ite,eye_medfilt_win)
        self.data_df[eye_channel[0]] = self.data_df[eye_channel[0]].apply(lamb_medfilt)
        self.data_df[eye_channel[1]] = self.data_df[eye_channel[1]].apply(lamb_medfilt)
        # gaussian filt eye x,y position
        lamb_gausfilt = lambda ite:ndimage.filters.gaussian_filter1d(ite,eye_gausfilt_sigma)
        self.data_df[eye_channel[0]] = self.data_df[eye_channel[0]].apply(lamb_gausfilt)
        self.data_df[eye_channel[1]] = self.data_df[eye_channel[1]].apply(lamb_gausfilt)
        # align eye to realign_mark, realign_timebin uses realign_mark as reference
        realign_poinnum = (self.data_df[realign_mark]/samp_time).values
        start_points = realign_poinnum + realign_timebin[0]/samp_time
        points_num = int((realign_timebin[1]-realign_timebin[0])/samp_time)
        for channel in eye_channel:
            align_points = list()
            for idx in self.data_df.index:
                start_point = start_points[idx]
                if ~np.isnan(start_point):
                    start_point = int(start_point)
                    end_point = start_point + points_num
                    align_point = self.data_df[channel].loc[idx][start_point:end_point]
                    align_point = align_point.mean()
                else:
                    align_point = np.nan
                align_points.append(align_point)
            self.data_df[channel] = self.data_df[channel] - align_points

    # find all saccades for all trials
simulateUncertDependencyOnExpTime.py 文件源码 项目:imgProcessor 作者: radjkarl 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _flux(t, n, duration, std, offs=1):
    '''
    returns Gaussian shaped signal fluctuations [events]

    t --> times to calculate event for
    n --> numbers of events per sec
    duration --> event duration per sec
    std --> std of event if averaged over time
    offs --> event offset
    '''
    duration *= len(t) / t[-1]
    duration = int(max(duration, 1))

    pos = []
    n *= t[-1]
    pp = np.arange(len(t))
    valid = np.ones_like(t, dtype=bool)
    for _ in range(int(round(n))):
        try:
            ppp = np.random.choice(pp[valid], 1)[0]
            pos.append(ppp)
            valid[max(0, ppp - duration - 1):ppp + duration + 1] = False
        except ValueError:
            break
    sign = np.random.randint(0, 2, len(pos))
    sign[sign == 0] = -1

    out = np.zeros_like(t)

    amps = np.random.normal(loc=0, scale=1, size=len(pos))

    if duration > 2:
        evt = gaussian(duration, duration)
        evt -= evt[0]
    else:
        evt = np.ones(shape=duration)


    for s, p, a in zip(sign, pos, amps):
        pp = duration
        if p + duration > len(out):
            pp = len(out) - p

        out[p:p + pp] = s * a * evt[:pp]

    out /= out.std() / std
    out += offs
    return out
poissonianlikemethods.py 文件源码 项目:qudi 作者: Ulm-IQO 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def make_poissonian_model(self, prefix=None):
    """ Create a model of a single poissonian with an offset.

    param str prefix: optional string, which serves as a prefix for all
                       parameters used in this model. That will prevent
                       name collisions if this model is used in a composite
                       way.

    @return tuple: (object model, object params)

    Explanation of the objects:
        object lmfit.model.CompositeModel model:
            A model the lmfit module will use for that fit. Here a
            gaussian model. Returns an object of the class
            lmfit.model.CompositeModel.

        object lmfit.parameter.Parameters params:
            It is basically an OrderedDict, so a dictionary, with keys
            denoting the parameters as string names and values which are
            lmfit.parameter.Parameter (without s) objects, keeping the
            information about the current value.
    """
    def poisson_function(x, mu):
        """ Function of a poisson distribution.

        @param numpy.array x: 1D array as the independent variable - e.g. occurences
        @param float mu: expectation value

        @return: poisson function: in order to use it as a model
        """
        return self.poisson(x, mu)

    amplitude_model, params = self.make_amplitude_model(prefix=prefix)

    if not isinstance(prefix, str) and prefix is not None:

        self.log.error('The passed prefix <{0}> of type {1} is not a string and'
                       'cannot be used as a prefix and will be ignored for now.'
                       'Correct that!'.format(prefix, type(prefix)))

        poissonian_model = Model(poisson_function, independent_vars='x')

    else:

        poissonian_model = Model(poisson_function, independent_vars='x',
                                 prefix=prefix)

    poissonian_ampl_model = amplitude_model * poissonian_model
    params = poissonian_ampl_model.make_params()

    return poissonian_ampl_model, params


问题


面经


文章

微信
公众号

扫码关注公众号