python类erf()的实例源码

TDEMDipolarfields.py 文件源码 项目:em_examples 作者: geoscixyz 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def H_from_MagneticDipoleWholeSpace(XYZ, srcLoc, sig, t, current=1., length=1., orientation='X', kappa=0., epsr=1.):

    """
        Computing the analytic magnetic fields (H) from an magnetic dipole in a wholespace
        - You have the option of computing E for multiple times at a single reciever location
          or a single time at multiple locations

        :param numpy.array XYZ: reciever locations at which to evaluate E
        :param numpy.array srcLoc: [x,y,z] triplet defining the location of the electric dipole source
        :param float sig: value specifying the conductivity (S/m) of the wholespace
        :param numpy.array t: array of times at which to measure
        :param float current: size of the injected current (A), default is 1.0 A
        :param float length: length of the dipole (m), default is 1.0 m
        :param str orientation: orientation of dipole: 'X', 'Y', or 'Z'
        :param float kappa: magnetic susceptiblity value (unitless), default is 0.
        :param float epsr: relative permitivitty value (unitless),  default is 1.0
        :rtype: numpy.array
        :return: Hx, Hy, Hz: arrays containing all 3 components of E evaluated at the specified locations and times.
    """

    mu = mu_0*(1+kappa)
    epsilon = epsilon_0*epsr

    XYZ = Utils.asArray_N_x_Dim(XYZ, 3)
    # Check
    if XYZ.shape[0] > 1 & t.shape[0] > 1:
        raise Exception("I/O type error: For multiple field locations only a single time can be specified.")

    dx = XYZ[:,0]-srcLoc[0]
    dy = XYZ[:,1]-srcLoc[1]
    dz = XYZ[:,2]-srcLoc[2]

    r  = np.sqrt( dx**2. + dy**2. + dz**2.)
    theta = np.sqrt((mu*sig)/(4*t))

    front = current * length / (4.* pi  * r**3)
    mid   = 3 * erf(theta*r) - (4/np.sqrt(pi) * (theta)**3 * r**3 + 6/np.sqrt(pi) * theta * r) * np.exp(-(theta)**2 * (r)**2)
    extra = (erf(theta*r) - (4/np.sqrt(pi) * (theta)**3 * r**3 + 2/np.sqrt(pi) * theta * r) * np.exp(-(theta)**2 * (r)**2))

    if orientation.upper() == 'X':
        Hx = front*(dx**2 / r**2)*mid - front*extra
        Hy = front*(dx*dy  / r**2)*mid
        Hz = front*(dx*dz  / r**2)*mid
        return Ex, Ey, Ez

    elif orientation.upper() == 'Y':
        #  x--> y, y--> z, z-->x
        Hy = front*(dy**2 / r**2)*mid - front*extra
        Hz = front*(dy*dz  / r**2)*mid
        Hx = front*(dy*dx  / r**2)*mid
        return Ex, Ey, Ez

    elif orientation.upper() == 'Z':
        # x --> z, y --> x, z --> y
        Hz = front*(dz**2 / r**2)*mid - front*extra
        Hx = front*(dz*dx  / r**2)*mid
        Hy = front*(dz*dy  / r**2)*mid
        return Hx, Hy, Hz
line.py 文件源码 项目:MOSFiT 作者: guillochon 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def process(self, **kwargs):
        """Process module."""
        kwargs = self.prepare_input(self.key('luminosities'), **kwargs)
        self._rest_t_explosion = kwargs[self.key('resttexplosion')]
        self._times = kwargs[self.key('rest_times')]
        self._seds = kwargs[self.key('seds')]
        self._bands = kwargs['all_bands']
        self._band_indices = kwargs['all_band_indices']
        self._sample_wavelengths = kwargs['sample_wavelengths']
        self._frequencies = kwargs['all_frequencies']
        self._luminosities = kwargs[self.key('luminosities')]
        self._line_wavelength = kwargs[self.key('line_wavelength')]
        self._line_width = kwargs[self.key('line_width')]
        self._line_time = kwargs[self.key('line_time')]
        self._line_duration = kwargs[self.key('line_duration')]
        self._line_amplitude = kwargs[self.key('line_amplitude')]
        lw = self._line_wavelength
        ls = self._line_width
        cc = self.C_CONST
        zp1 = 1.0 + kwargs[self.key('redshift')]
        amps = [
            self._line_amplitude * np.exp(-0.5 * (
                (x - self._rest_t_explosion - self._line_time) /
                self._line_duration) ** 2) for x in self._times]

        seds = self._seds
        evaled = False
        for li, lum in enumerate(self._luminosities):
            bi = self._band_indices[li]
            if lum == 0.0:
                if bi >= 0:
                    seds.append(np.zeros_like(self._sample_wavelengths[bi]))
                else:
                    seds.append([0.0])
                continue
            if bi >= 0:
                rest_wavs = (self._sample_wavelengths[bi] *
                             u.Angstrom.cgs.scale / zp1)
            else:
                rest_wavs = [cc / (self._frequencies[li] * zp1)]  # noqa: F841

            amp = lum * amps[li]

            if not evaled:
                sed = ne.evaluate(
                    'amp * exp(-0.5 * ((rest_wavs - lw) / ls) ** 2)')
                evaled = True
            else:
                sed = ne.re_evaluate()

            sed = np.nan_to_num(sed)

            norm = (lum + amp / zp1 * np.sqrt(np.pi / 2.0) * (
                1.0 + erf(lw / (np.sqrt(2.0) * ls)))) / lum

            seds[li] += sed
            seds[li] /= norm

        return {'sample_wavelengths': self._sample_wavelengths,
                self.key('seds'): seds}
regtests.py 文件源码 项目:describe 作者: SINGROUP 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_cdf(self):
        """Test that the implementation of the gaussian value through the
        cumulative distribution function works as expected.
        """
        from scipy.stats import norm
        from scipy.special import erf

        start = -5
        stop = 5
        n_points = 9
        centers = np.array([0])
        sigma = 1

        area_cum = []
        area_pdf = []

        # Calculate errors for dfferent number of points
        for n_points in range(2, 10):

            axis = np.linspace(start, stop, n_points)

            # Calculate with cumulative function
            dx = (stop - start)/(n_points-1)
            x = np.linspace(start-dx/2, stop+dx/2, n_points+1)
            pos = x[np.newaxis, :] - centers[:, np.newaxis]
            y = 1/2*(1 + erf(pos/(sigma*np.sqrt(2))))
            f = np.sum(y, axis=0)
            f_rolled = np.roll(f, -1)
            pdf_cum = (f_rolled - f)[0:-1]/dx

            # Calculate with probability function
            dist2 = axis[np.newaxis, :] - centers[:, np.newaxis]
            dist2 *= dist2
            f = np.sum(np.exp(-dist2/(2*sigma**2)), axis=0)
            f *= 1/math.sqrt(2*sigma**2*math.pi)
            pdf_pdf = f

            true_axis = np.linspace(start, stop, 200)
            pdf_true = norm.pdf(true_axis, centers[0], sigma) # + norm.pdf(true_axis, centers[1], sigma)

            # Calculate differences
            sum_cum = np.sum(0.5*dx*(pdf_cum[:-1]+pdf_cum[1:]))
            sum_pdf = np.sum(0.5*dx*(pdf_pdf[:-1]+pdf_pdf[1:]))
            area_cum.append(sum_cum)
            area_pdf.append(sum_pdf)

            # mpl.plot(axis, pdf_pdf, linestyle=":", linewidth=3, color="r")
            # mpl.plot(axis, pdf_cum, linewidth=1, color="g")
            # mpl.plot(true_axis, pdf_true, linestyle="--", color="b")
            # mpl.show()

        mpl.plot(area_cum, linestyle=":", linewidth=3, color="r")
        mpl.plot(area_pdf, linewidth=1, color="g")
        # mpl.plot(true_axis, pdf_true, linestyle="--", color="b")
        mpl.show()
gplvm_vfe_examples.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def run_mnist():
    np.random.seed(42)

    # import dataset
    f = gzip.open('./tmp/data/mnist.pkl.gz', 'rb')
    (x_train, t_train), (x_valid, t_valid), (x_test, t_test) = cPickle.load(f)
    f.close()

    Y = x_train[:100, :]
    labels = t_train[:100]

    Y[Y < 0.5] = -1
    Y[Y > 0.5] = 1

    # inference
    print "inference ..."
    M = 30
    D = 2
    # lvm = vfe.SGPLVM(Y, D, M, lik='Gaussian')
    lvm = vfe.SGPLVM(Y, D, M, lik='Probit')
    # lvm.train(alpha=0.5, no_epochs=10, n_per_mb=100, lrate=0.1, fixed_params=['sn'])
    lvm.optimise(method='L-BFGS-B')
    plt.figure()
    mx, vx = lvm.get_posterior_x()
    zu = lvm.sgp_layer.zu
    plt.scatter(mx[:, 0], mx[:, 1], c=labels)
    plt.plot(zu[:, 0], zu[:, 1], 'ko')

    nx = ny = 30
    x_values = np.linspace(-5, 5, nx)
    y_values = np.linspace(-5, 5, ny)
    sx = 28
    sy = 28
    canvas = np.empty((sx * ny, sy * nx))
    for i, yi in enumerate(x_values):
        for j, xi in enumerate(y_values):
            z_mu = np.array([[xi, yi]])
            x_mean, x_var = lvm.predict_f(z_mu)
            t = x_mean / np.sqrt(1 + x_var)
            Z = 0.5 * (1 + special.erf(t / np.sqrt(2)))
            canvas[(nx - i - 1) * sx:(nx - i) * sx, j *
                   sy:(j + 1) * sy] = Z.reshape(sx, sy)

    plt.figure(figsize=(8, 10))
    Xi, Yi = np.meshgrid(x_values, y_values)
    plt.imshow(canvas, origin="upper", cmap="gray")
    plt.tight_layout()

    plt.show()
gplvm_aep_examples.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def run_mnist():
    np.random.seed(42)

    # import dataset
    f = gzip.open('./tmp/data/mnist.pkl.gz', 'rb')
    (x_train, t_train), (x_valid, t_valid), (x_test, t_test) = cPickle.load(f)
    f.close()

    Y = x_train[:100, :]
    labels = t_train[:100]

    Y[Y < 0.5] = -1
    Y[Y > 0.5] = 1

    # inference
    print "inference ..."
    M = 30
    D = 2
    # lvm = aep.SGPLVM(Y, D, M, lik='Gaussian')
    lvm = aep.SGPLVM(Y, D, M, lik='Probit')
    # lvm.train(alpha=0.5, no_epochs=10, n_per_mb=100, lrate=0.1, fixed_params=['sn'])
    lvm.optimise(method='L-BFGS-B', alpha=0.1)
    plt.figure()
    mx, vx = lvm.get_posterior_x()
    zu = lvm.sgp_layer.zu
    plt.scatter(mx[:, 0], mx[:, 1], c=labels)
    plt.plot(zu[:, 0], zu[:, 1], 'ko')

    nx = ny = 30
    x_values = np.linspace(-5, 5, nx)
    y_values = np.linspace(-5, 5, ny)
    sx = 28
    sy = 28
    canvas = np.empty((sx * ny, sy * nx))
    for i, yi in enumerate(x_values):
        for j, xi in enumerate(y_values):
            z_mu = np.array([[xi, yi]])
            x_mean, x_var = lvm.predict_f(z_mu)
            t = x_mean / np.sqrt(1 + x_var)
            Z = 0.5 * (1 + special.erf(t / np.sqrt(2)))
            canvas[(nx - i - 1) * sx:(nx - i) * sx, j *
                   sy:(j + 1) * sy] = Z.reshape(sx, sy)

    plt.figure(figsize=(8, 10))
    Xi, Yi = np.meshgrid(x_values, y_values)
    plt.imshow(canvas, origin="upper", cmap="gray")
    plt.tight_layout()

    plt.show()
gplvm_aep_examples.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def run_xor():
    from operator import xor

    from scipy import special
    # create dataset
    print "generating dataset..."

    n = 25
    Y = np.zeros((0, 3))
    for i in [0, 1]:
        for j in [0, 1]:
            a = i * np.ones((n, 1))
            b = j * np.ones((n, 1))
            c = xor(bool(i), bool(j)) * np.ones((n, 1))
            Y_ij = np.hstack((a, b, c))
            Y = np.vstack((Y, Y_ij))

    Y = 2 * Y - 1

    # inference
    print "inference ..."
    M = 10
    D = 2
    lvm = aep.SGPLVM(Y, D, M, lik='Probit')
    lvm.optimise(method='L-BFGS-B', alpha=0.1, maxiter=200)

    # predict given inputs
    mx, vx = lvm.get_posterior_x()
    lims = [-1.5, 1.5]
    x = np.linspace(*lims, num=101)
    y = np.linspace(*lims, num=101)
    X, Y = np.meshgrid(x, y)
    X_ravel = X.ravel()
    Y_ravel = Y.ravel()
    inputs = np.vstack((X_ravel, Y_ravel)).T
    my, vy = lvm.predict_f(inputs)
    t = my / np.sqrt(1 + vy)
    Z = 0.5 * (1 + special.erf(t / np.sqrt(2)))
    for d in range(3):
        plt.figure()
        plt.scatter(mx[:, 0], mx[:, 1])
        zu = lvm.sgp_layer.zu
        plt.plot(zu[:, 0], zu[:, 1], 'ko')
        plt.contour(X, Y, np.log(Z[:, d] + 1e-16).reshape(X.shape))
        plt.xlim(*lims)
        plt.ylim(*lims)

    # Y_test = np.array([[1, -1, 1], [-1, 1, 1], [-1, -1, -1], [1, 1, -1]])
    # # impute missing data
    # for k in range(3):
    #   Y_test_k = Y_test
    #   missing_mask = np.ones_like(Y_test_k)
    #   missing_mask[:, k] = 0
    #   my_pred, vy_pred = lvm.impute_missing(
    #       Y_test_k, missing_mask,
    #       alpha=0.1, no_iters=100, add_noise=False)

    #   print k, my_pred, vy_pred, Y_test_k

    plt.show()
synthetic.py 文件源码 项目:multi-diffusion 作者: chemical-diffusion 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def create_diffusion_profiles(diff_matrix, x_points, exchange_vectors,
                              noise_level=0.02, seed=0):
    """
    Compute theoretical concentration profiles, given the diffusion matrix and
    exchange directions.

    Parameters
    ----------

    diff_matrix : tuple
        tuple of eigenvalues and eigenvectors

    x_points : list of arrays
        points at which profiles are measured. There are as many profiles as
        exchange vectors.

    exchange_vectors : array
        array where each column encodes the species that are exchanged in the
        diffusion experiment. There are as many columns as diffusion
        experiments.

    noise_level : float
        Gaussian noise can be added to the theoretical profiles, in order to
        simulation experimental noise.
    """
    gen = np.random.RandomState(seed)
    diags, P = diff_matrix
    P = np.matrix(P)
    exchanges = exchange_vectors[:-1]
    n_comps = exchanges.shape[0]
    if n_comps != P.shape[0]:
        raise ValueError("Exchange vectors must be given in the full basis")
    concentration_profiles = []
    for i_exp, x_prof in enumerate(x_points):
        orig = P.I * exchanges[:, i_exp][:, None] / 2.
        profs = np.empty((n_comps, len(x_prof)))
        for i in range(n_comps):
            profs[i] = orig[i] * erf(x_prof / np.sqrt(4 * diags[i]))
        profiles = np.array(P * np.matrix(profs))
        profiles = np.vstack((profiles, - profiles.sum(axis=0)))
        concentration_profiles.append(np.array(profiles) + 
                                noise_level * gen.randn(*profiles.shape))
    return concentration_profiles
cdf.py 文件源码 项目:opcsim 作者: dhhagan 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def nt(n, gm, gsd, dmin=None, dmax=10.):
    """Evaluate the total number of particles between two diameters.

    The CDF of a lognormal distribution is calculated using equation
    8.39 from Seinfeld and Pandis.

    Mathematically, it is represented as:

    .. math::

        N_t(D_p)=?_{D_{min}}^{D_{max}}n_N(D_p^*)dD^*_p=\\frac{N_t}{2}+\\frac{N_t}{2}*erf\Big(\\frac{ln(D_p/D_{pg})}{\sqrt{2} ln?_g}\Big) \\;\\;(cm^{-3})

    Parameters
    ----------
    n : float
        Total aerosol number concentration in units of #/cc
    gm : float
        Median particle diameter (geometric mean) in units of microns.
    gsd : float
        Geometric Standard Deviation of the distribution.
    dmin : float
        The minimum particle diameter in microns. Default value is 0 :math:`\mu m`.
    dmax : float
        The maximum particle diameter in microns. Default value is 10 :math:`\mu m`.

    Returns
    -------
    N | float
        Returns the total number of particles between dmin and dmax in units of
        [:math:`particles*cm^{-3}`]

    See Also
    --------
    opcsim.equations.pdf.dn_ddp
    opcsim.equations.pdf.dn_dlndp
    opcsim.equations.pdf.dn_dlogdp

    Examples
    --------

    Evaluate the number of particles in a simple distribution between 0 and
    2.5 :math:`\mu m`:

    >>> d = opcsim.AerosolDistribution()
    >>> d.add_mode(1e3, 100, 1.5, "mode 1")
    >>> n = opcsim.equations.cdf.nt(1e3, 0.1, 1.5, dmax=2.5)

    """
    res = (n/2.) * (1 + erf((np.log(dmax/gm)) / (np.sqrt(2) * np.log(gsd))))

    if dmin is not None and dmin > 0.0:
        res -= nt(n, gm, gsd, dmin=None, dmax=dmin)

    return res


问题


面经


文章

微信
公众号

扫码关注公众号