python类log1p()的实例源码

_utilities.py 文件源码 项目:q2-coordinates 作者: nbokulich 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def plot_basemap(latitude, longitude, image, color_palette=None):
    # define basemap, color palette
    cmap, tiler = get_map_params(image, color_palette)

    # Find min/max coordinates to set extent
    lat_0, lat_1, lon_0, lon_1 = get_max_extent(latitude, longitude)

    # Automatically focus resolution
    max_span = max((abs(lat_1) - abs(lat_0)), (abs(lon_1) - abs(lon_0)))
    res = int(-1.4 * math.log1p(max_span) + 13)

    # initiate tiler projection
    ax = plt.axes(projection=tiler.crs)
    # Define extents of any plotted data
    ax.set_extent((lon_0, lon_1, lat_0, lat_1), ccrs.Geodetic())
    # add terrain background
    ax.add_image(tiler, res)

    return ax, cmap
utils.py 文件源码 项目:sparks 作者: ImpactHorizon 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def detect_peaks(hist, count=2):
    hist_copy = hist    
    peaks = len(argrelextrema(hist_copy, np.greater, mode="wrap")[0])
    sigma = log1p(peaks)
    print(peaks, sigma)
    while (peaks > count):
        new_hist = gaussian_filter(hist_copy, sigma=sigma)        
        peaks = len(argrelextrema(new_hist, np.greater, mode="wrap")[0])
        if peaks < count:
            peaks = count + 1
            sigma = sigma * 0.5
            continue
        hist_copy = new_hist
        sigma = log1p(peaks)
    print(peaks, sigma)
    return argrelextrema(hist_copy, np.greater, mode="wrap")[0]
geomath.py 文件源码 项目:qgis-shapetools-plugin 作者: NationalSecurityAgency 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def log1p(x):
    """log(1 + x) accurate for small x (missing from python 2.5.2)"""

    if sys.version_info > (2, 6):
      return math.log1p(x)

    y = 1 + x
    z = y - 1
    # Here's the explanation for this magic: y = 1 + z, exactly, and z
    # approx x, thus log(y)/z (which is nearly constant near z = 0) returns
    # a good approximation to the true log(1 + x)/x.  The multiplication x *
    # (log(y)/z) introduces little additional error.
    return x if z == 0 else x * math.log(y) / z
geomath.py 文件源码 项目:qgis-shapetools-plugin 作者: NationalSecurityAgency 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def atanh(x):
    """atanh(x) (missing from python 2.5.2)"""

    if sys.version_info > (2, 6):
      return math.atanh(x)

    y = abs(x)                  # Enforce odd parity
    y = Math.log1p(2 * y/(1 - y))/2
    return -y if x < 0 else y
utils.py 文件源码 项目:lopocs 作者: Oslandia 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def compute_scale_for_cesium(coordmin, coordmax):
    '''
    Cesium quantized positions need to be in uint16
    This function computes the best scale to apply to coordinates
    to fit the range [0, 65535]
    '''
    max_int = np.iinfo(np.uint16).max
    delta = abs(coordmax - coordmin)
    scale = 10 ** -(math.floor(math.log1p(max_int / delta) / math.log1p(10)))
    return scale
math.py 文件源码 项目:p2pool-bch 作者: amarian12 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1
math.py 文件源码 项目:p2pool-unitus 作者: amarian12 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1
test_math.py 文件源码 项目:zippy 作者: securesystemslab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
math.py 文件源码 项目:p2pool-dgb-sha256 作者: ilsawa 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1
test_math.py 文件源码 项目:oil 作者: oilshell 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
test_math.py 文件源码 项目:python2-tracer 作者: extremecoders-re 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
math.py 文件源码 项目:p2pool-ltc 作者: ilsawa 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1
math.py 文件源码 项目:p2pool-bsty 作者: amarian12 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1
test_math.py 文件源码 项目:web_ctp 作者: molebot 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
branch_support.py 文件源码 项目:mandos 作者: carolinetomo 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def logsum(l):
    x = l[0]
    for i in l[1:]:
        try:
            x += math.log1p(math.exp(i-x))
        except:
            x += 0
    return x
math.py 文件源码 项目:p2pool-cann 作者: ilsawa 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1
cts.py 文件源码 项目:tensorflow-rl 作者: steveKapturowski 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def log_add(log_x, log_y):
    """Given log x and log y, returns log(x + y)."""
    # Swap variables so log_y is larger.
    if log_x > log_y:
        log_x, log_y = log_y, log_x

    # Use the log(1 + e^p) trick to compute this efficiently
    # If the difference is large enough, this is effectively log y.
    delta = log_y - log_x
    return math.log1p(math.exp(delta)) + log_x if delta <= 50.0 else log_y
calculator.py 文件源码 项目:tichu-tournament 作者: aragos 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _log_rps(self, rps):
        if rps > 0:
            return math.log1p(rps)
        else: 
            return -math.log1p(-rps)
sugar.py 文件源码 项目:hyper-engine 作者: maxim5 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def log1p(node): return merge([node], math.log1p)
test_math.py 文件源码 项目:pefile.pypy 作者: cloudtracer 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
cmath.py 文件源码 项目:ouroboros 作者: pybee 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def atanh(x):
    _atanh_special = [
        [complex(-0.0, -pi/2), complex(-0.0, -pi/2), complex(-0.0, -pi/2),
            complex(-0.0, pi/2), complex(-0.0, pi/2), complex(-0.0, pi/2),
            complex(-0.0, float("nan"))],
        [complex(-0.0, -pi/2), None, None, None, None, complex(-0.0, pi/2),
            nan+nanj],
        [complex(-0.0, -pi/2), None, None, None, None, complex(-0.0, pi/2),
            complex(-0.0, float("nan"))],
        [-1j*pi/2, None, None, None, None, 1j*pi/2, nanj],
        [-1j*pi/2, None, None, None, None, 1j*pi/2, nan+nanj],
        [-1j*pi/2, -1j*pi/2, -1j*pi/2, 1j*pi/2, 1j*pi/2, 1j*pi/2,  nanj],
        [-1j*pi/2, nan+nanj, nan+nanj, nan+nanj, nan+nanj, 1j*pi/2, nan+nanj]
    ]

    z = _make_complex(x)

    if not isfinite(z):
        return _atanh_special[_special_type(z.real)][_special_type(z.imag)]

    if z.real < 0:
        return -atanh(-z)

    ay = abs(z.imag)
    if z.real > _SQRT_LARGE_DOUBLE or ay > _SQRT_LARGE_DOUBLE:
        hypot = math.hypot(z.real/2, z.imag/2)
        return complex(z.real/4/hypot/hypot, -math.copysign(pi/2, -z.imag))
    if z.real == 1 and ay < _SQRT_DBL_MIN:
        if ay == 0:
            raise ValueError
        return complex(-math.log(math.sqrt(ay)/math.sqrt(math.hypot(ay, 2))),
                       math.copysign(math.atan2(2, -ay)/2, z.imag))
    return complex(math.log1p(4*z.real/((1-z.real)*(1-z.real) + ay*ay))/4,
                   -math.atan2(-2*z.imag, (1-z.real)*(1+z.real) - ay*ay)/2)
test_math.py 文件源码 项目:ouroboros 作者: pybee 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
test_math.py 文件源码 项目:ndk-python 作者: gittor 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
test_math.py 文件源码 项目:kbe_server 作者: xiaohaoppy 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
tfidfmodel.py 文件源码 项目:paragraph2vec 作者: thunlp 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass
tfidfmodel.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass
tfidfmodel.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass
tfidfmodel.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass
tfidfmodel.py 文件源码 项目:nonce2vec 作者: minimalparts 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass
EPAIRED.py 文件源码 项目:ESPSS 作者: rcalinjageman 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def nct(tval, delta, df):
  #This is the non-central t-distruction function
  #This is a direct translation from visual-basic to Python of the nct implementation by Geoff Cumming for ESCI

  #Noncentral t function, after Excel worksheet calculation by Geoff Robinson
  #The three parameters are:
  #   tval -- our t value
  #   delta -- noncentrality parameter
  #   df -- degrees of freedom

  #Dim dfmo As Integer     'for df-1
  #Dim tosdf As Double     'for tval over square root of df
  #Dim sep As Double       'for separation of points
  #Dim sepa As Double      'for temp calc of points
  #Dim consta As Double    'for constant
  #Dim ctr As Integer      'loop counter

  dfmo = df - 1.0
  tosdf = tval / math.sqrt(df)
  sep = (math.sqrt(df) + 7) / 100
  consta = math.exp( (2 - df) * 0.5 * math.log1p(1) - math.lgamma(df/2) ) * sep /3

  #now do first term in cross product summation, with df=0 special
  if dfmo > 0:
    nctvalue = stdnormdist(0 * tosdf - delta) * 0 ** dfmo * math.exp(-0.5* 0 * 0)
  else:  
    nctvalue = stdnormdist(0 * tosdf - delta) * math.exp(-0.5 * 0 * 0)

  #now add in second term, with multiplier 4
  nctvalue = nctvalue + 4 * stdnormdist(sep*tosdf - delta) * sep ** dfmo * math.exp(-0.5*sep*sep)    

  #now loop 49 times to add 98 terms
  for ctr in range(1,49):
      sepa = 2 * ctr * sep
      nctvalue = nctvalue + 2 * stdnormdist(sepa * tosdf - delta) * sepa ** dfmo * math.exp(-0.5 * sepa * sepa)
      sepa = sepa + sep
      nctvalue = nctvalue + 4 * stdnormdist(sepa * tosdf - delta) * sepa ** dfmo * math.exp(-0.5 * sepa * sepa)

  #add in last term
  sepa = sepa + sep
  nctvalue = nctvalue + stdnormdist(sepa * tosdf - delta) * sepa ** dfmo * math.exp(-0.5 * sepa * sepa)

  #multiply by the constant and we've finished
  nctvalue = nctvalue * consta

  return nctvalue


问题


面经


文章

微信
公众号

扫码关注公众号