python类float_()的实例源码

extmath.py 文件源码 项目:mpnum 作者: dseuss 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _standard_normal(shape, randstate=np.random, dtype=np.float_):
    """Generates a standard normal numpy array of given shape and dtype, i.e.
    this function is equivalent to `randstate.randn(*shape)` for real dtype and
    `randstate.randn(*shape) + 1.j * randstate.randn(shape)` for complex dtype.

    :param tuple shape: Shape of array to be returned
    :param randstate: An instance of :class:`numpy.random.RandomState` (default is
        ``np.random``))
    :param dtype: ``np.float_`` (default) or `np.complex_`

    Returns
    -------

    A: An array of given shape and dtype with standard normal entries

    """
    if dtype == np.float_:
        return randstate.randn(*shape)
    elif dtype == np.complex_:
        return randstate.randn(*shape) + 1.j * randstate.randn(*shape)
    else:
        raise ValueError('{} is not a valid dtype.'.format(dtype))
mparray_test.py 文件源码 项目:mpnum 作者: dseuss 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_operations_typesafety(nr_sites, local_dim, rank, rgen):
    # create a real MPA
    mpo1 = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
                              randstate=rgen, dtype=np.float_)
    mpo2 = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
                              randstate=rgen, dtype=np.complex_)

    assert mpo1.dtype == np.float_
    assert mpo2.dtype == np.complex_

    assert (mpo1 + mpo1).dtype == np.float_
    assert (mpo1 + mpo2).dtype == np.complex_
    assert (mpo2 + mpo1).dtype == np.complex_

    assert mp.sumup((mpo1, mpo1)).dtype == np.float_
    assert mp.sumup((mpo1, mpo2)).dtype == np.complex_
    assert mp.sumup((mpo2, mpo1)).dtype == np.complex_

    assert (mpo1 - mpo1).dtype == np.float_
    assert (mpo1 - mpo2).dtype == np.complex_
    assert (mpo2 - mpo1).dtype == np.complex_

    mpo1 += mpo2
    assert mpo1.dtype == np.complex_
_kdtree.py 文件源码 项目:hienoi 作者: christophercrouzet 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, data, bucket_size=128):
        if bucket_size < 1:
            raise ValueError("A minimum bucket size of 1 is expected.")

        self._data = data
        self._n, self._k = self._data.shape
        self._nodes = None
        self._buckets = []
        self._bucket_size = bucket_size

        self._node_dtype = numpy.dtype([
            ('size', numpy.intp),
            ('bucket', numpy.intp),
            ('lower_bounds', (numpy.float_, self._k)),
            ('upper_bounds', (numpy.float_, self._k)),
        ])
        self._neighbour_dtype = numpy.dtype([
            ('squared_distance', numpy.float_),
            ('index', numpy.intp),
        ])

        self._build()
_kdtree.py 文件源码 项目:hienoi 作者: christophercrouzet 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def search(self, point, count, radius, sort):
        """Retrieve the neighbours to a point."""
        if count is None:
            count = self._n
        elif count < 1:
            return numpy.empty(0, dtype=self._neighbour_dtype)

        if radius is None:
            radius = numpy.inf
        elif radius < 0.0:
            return numpy.empty(0, dtype=self._neighbour_dtype)

        point = numpy.asarray(point, dtype=numpy.float_)
        if count >= self._n:
            return self._search_all_within_radius(point, radius, sort)
        else:
            return self._search_k_nearests(point, count, radius, sort)
test_indexing.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_empty_tuple_index(self):
        # Empty tuple index creates a view
        a = np.array([1, 2, 3])
        assert_equal(a[()], a)
        assert_(a[()].base is a)
        a = np.array(0)
        assert_(isinstance(a[()], np.int_))

        # Regression, it needs to fall through integer and fancy indexing
        # cases, so need the with statement to ignore the non-integer error.
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', '', DeprecationWarning)
            a = np.array([1.])
            assert_(isinstance(a[0.], np.float_))

            a = np.array([np.array(1)], dtype=object)
            assert_(isinstance(a[0.], np.ndarray))
testutils.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8):
    """
    Returns true if all components of a and b are equal to given tolerances.

    If fill_value is True, masked values considered equal. Otherwise,
    masked values are considered unequal.  The relative error rtol should
    be positive and << 1.0 The absolute error atol comes into play for
    those elements of b that are very small or zero; it says how small a
    must be also.

    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1, d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y))
    return d.ravel()
jsonme.py 文件源码 项目:cobrame 作者: SBRG 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _fix_type(value):
    """convert possible types to str, float, and bool"""
    # Because numpy floats can not be pickled to json
    if isinstance(value, string_types):
        return str(value)
    if isinstance(value, float_):
        return float(value)
    if isinstance(value, bool_):
        return bool(value)
    if isinstance(value, set):
        return list(value)
    if isinstance(value, Basic):
        return str(value)
    if hasattr(value, 'id'):
        return str(value.id)
    # if value is None:
    #     return ''
    return value
json.py 文件源码 项目:incubator-airflow-old 作者: apache 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def default(self, obj):
        # convert dates and numpy objects in a json serializable format
        if isinstance(obj, datetime):
            return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
        elif isinstance(obj, date):
            return obj.strftime('%Y-%m-%d')
        elif type(obj) in (np.int_, np.intc, np.intp, np.int8, np.int16,
                           np.int32, np.int64, np.uint8, np.uint16,
                           np.uint32, np.uint64):
            return int(obj)
        elif type(obj) in (np.bool_,):
            return bool(obj)
        elif type(obj) in (np.float_, np.float16, np.float32, np.float64,
                           np.complex_, np.complex64, np.complex128):
            return float(obj)

        # Let the base class default method raise the TypeError
        return json.JSONEncoder.default(self, obj)
discontinuities.py 文件源码 项目:simupy 作者: sixpearls 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def event_bounds_expressions(self, event_bounds_exp):
        if hasattr(self, 'output_equations'):
            assert len(event_bounds_exp)+1 == self.output_equations.shape[0]
        if hasattr(self, 'output_equations_functions'):
            assert len(event_bounds_exp)+1 == \
                self.output_equations_functions.size
        if hasattr(self, 'state_equations'):
            assert len(event_bounds_exp)+1 == self.state_equations.shape[0]
        if hasattr(self, 'state_equations_functions'):
            assert len(event_bounds_exp)+1 == \
                self.state_equations_functions.size
        self._event_bounds_expressions = event_bounds_exp
        self.event_bounds = np.array(
            [sp.N(bound, subs=self.constants_values)
             for bound in event_bounds_exp],
            dtype=np.float_
        )
test_indexing.py 文件源码 项目:krpcScripts 作者: jwvanderbeck 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_empty_tuple_index(self):
        # Empty tuple index creates a view
        a = np.array([1, 2, 3])
        assert_equal(a[()], a)
        assert_(a[()].base is a)
        a = np.array(0)
        assert_(isinstance(a[()], np.int_))

        # Regression, it needs to fall through integer and fancy indexing
        # cases, so need the with statement to ignore the non-integer error.
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', '', DeprecationWarning)
            a = np.array([1.])
            assert_(isinstance(a[0.], np.float_))

            a = np.array([np.array(1)], dtype=object)
            assert_(isinstance(a[0.], np.ndarray))
testutils.py 文件源码 项目:krpcScripts 作者: jwvanderbeck 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8):
    """
    Returns true if all components of a and b are equal to given tolerances.

    If fill_value is True, masked values considered equal. Otherwise,
    masked values are considered unequal.  The relative error rtol should
    be positive and << 1.0 The absolute error atol comes into play for
    those elements of b that are very small or zero; it says how small a
    must be also.

    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1, d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y))
    return d.ravel()
pyxpose.py 文件源码 项目:pyxpose 作者: PetitPrince 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def find_a_dominant_color(image):
    # K-mean clustering to find the k most dominant color, from:
    # http://stackoverflow.com/questions/3241929/python-find-dominant-most-common-color-in-an-image
    n_clusters = 5

    # Get image into a workable form
    im = image.copy()
    im = im.resize((150, 150))      # optional, to reduce time
    ar = scipy.misc.fromimage(im)
    im_shape = ar.shape
    ar = ar.reshape(scipy.product(im_shape[:2]), im_shape[2])
    ar = np.float_(ar)

    # Compute clusters
    codes, dist = scipy.cluster.vq.kmeans(ar, n_clusters)
    vecs, dist = scipy.cluster.vq.vq(ar, codes)         # assign codes
    counts, bins = scipy.histogram(vecs, len(codes))    # count occurrences

    # Get the indexes of the most frequent, 2nd most frequent, 3rd, ...
    sorted_idxs = np.argsort(counts)

    # Get the color
    peak = codes[sorted_idxs[1]] # get second most frequent color

    return [int(i) for i in peak.tolist()] # list comprehension to quickly cast everything to int
test_base.py 文件源码 项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda 作者: SignalMedia 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_empty_fancy(self):
        empty_farr = np.array([], dtype=np.float_)
        empty_iarr = np.array([], dtype=np.int_)
        empty_barr = np.array([], dtype=np.bool_)

        # pd.DatetimeIndex is excluded, because it overrides getitem and should
        # be tested separately.
        for idx in [self.strIndex, self.intIndex, self.floatIndex]:
            empty_idx = idx.__class__([])

            self.assertTrue(idx[[]].identical(empty_idx))
            self.assertTrue(idx[empty_iarr].identical(empty_idx))
            self.assertTrue(idx[empty_barr].identical(empty_idx))

            # np.ndarray only accepts ndarray of int & bool dtypes, so should
            # Index.
            self.assertRaises(IndexError, idx.__getitem__, empty_farr)
test_constructors.py 文件源码 项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda 作者: SignalMedia 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_fromValue(self):

        nans = Series(np.NaN, index=self.ts.index)
        self.assertEqual(nans.dtype, np.float_)
        self.assertEqual(len(nans), len(self.ts))

        strings = Series('foo', index=self.ts.index)
        self.assertEqual(strings.dtype, np.object_)
        self.assertEqual(len(strings), len(self.ts))

        d = datetime.now()
        dates = Series(d, index=self.ts.index)
        self.assertEqual(dates.dtype, 'M8[ns]')
        self.assertEqual(len(dates), len(self.ts))

        # GH12336
        # Test construction of categorical series from value
        categorical = Series(0, index=self.ts.index, dtype="category")
        expected = Series(0, index=self.ts.index).astype("category")
        self.assertEqual(categorical.dtype, 'category')
        self.assertEqual(len(categorical), len(self.ts))
        tm.assert_series_equal(categorical, expected)
test_indexing.py 文件源码 项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda 作者: SignalMedia 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_empty_tuple_index(self):
        # Empty tuple index creates a view
        a = np.array([1, 2, 3])
        assert_equal(a[()], a)
        assert_(a[()].base is a)
        a = np.array(0)
        assert_(isinstance(a[()], np.int_))

        # Regression, it needs to fall through integer and fancy indexing
        # cases, so need the with statement to ignore the non-integer error.
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', '', DeprecationWarning)
            a = np.array([1.])
            assert_(isinstance(a[0.], np.float_))

            a = np.array([np.array(1)], dtype=object)
            assert_(isinstance(a[0.], np.ndarray))
testutils.py 文件源码 项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda 作者: SignalMedia 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def almost(a, b, decimal=6, fill_value=True):
    """
    Returns True if a and b are equal up to decimal places.

    If fill_value is True, masked values considered equal. Otherwise,
    masked values are considered unequal.

    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1, d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal)
    return d.ravel()
test_indexing.py 文件源码 项目:aws-lambda-numpy 作者: vitolimandibhrata 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_empty_tuple_index(self):
        # Empty tuple index creates a view
        a = np.array([1, 2, 3])
        assert_equal(a[()], a)
        assert_(a[()].base is a)
        a = np.array(0)
        assert_(isinstance(a[()], np.int_))

        # Regression, it needs to fall through integer and fancy indexing
        # cases, so need the with statement to ignore the non-integer error.
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', '', DeprecationWarning)
            a = np.array([1.])
            assert_(isinstance(a[0.], np.float_))

            a = np.array([np.array(1)], dtype=object)
            assert_(isinstance(a[0.], np.ndarray))
testutils.py 文件源码 项目:aws-lambda-numpy 作者: vitolimandibhrata 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8):
    """
    Returns true if all components of a and b are equal to given tolerances.

    If fill_value is True, masked values considered equal. Otherwise,
    masked values are considered unequal.  The relative error rtol should
    be positive and << 1.0 The absolute error atol comes into play for
    those elements of b that are very small or zero; it says how small a
    must be also.

    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1, d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y))
    return d.ravel()
test_indexing.py 文件源码 项目:lambda-numba 作者: rlhotovy 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_empty_tuple_index(self):
        # Empty tuple index creates a view
        a = np.array([1, 2, 3])
        assert_equal(a[()], a)
        assert_(a[()].base is a)
        a = np.array(0)
        assert_(isinstance(a[()], np.int_))

        # Regression, it needs to fall through integer and fancy indexing
        # cases, so need the with statement to ignore the non-integer error.
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', '', DeprecationWarning)
            a = np.array([1.])
            assert_(isinstance(a[0.], np.float_))

            a = np.array([np.array(1)], dtype=object)
            assert_(isinstance(a[0.], np.ndarray))
testutils.py 文件源码 项目:lambda-numba 作者: rlhotovy 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8):
    """
    Returns true if all components of a and b are equal to given tolerances.

    If fill_value is True, masked values considered equal. Otherwise,
    masked values are considered unequal.  The relative error rtol should
    be positive and << 1.0 The absolute error atol comes into play for
    those elements of b that are very small or zero; it says how small a
    must be also.

    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1, d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y))
    return d.ravel()
statistics.py 文件源码 项目:PyME 作者: vikramsunkara 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def kl_divergence(p, q):
    """
    Returns KL-divergence of distribution q from distribution p.

    The Kullback-Leibler (KL) divergence is defined as

    .. math::

           \\textrm{KL-divergence}(p, q) :=
           \\sum_{x} p(x) \\log{} \\frac{p(x)}{q(x)}

    Warning: this function uses numpy's scalar floating point types to
    perform the evaluation. Therefore, the result may be non-finite.
    For example, if the state x has non-zero probability for distribution p,
    but zero probability for distribution q, then the result will be
    non-finite.
    """
    accum = 0.0
    for x in p:
        p_x = numpy.float_(p[x])
        if p_x != 0.0:
            q_x = numpy.float_(q.get(x, 0.0))
            accum += p_x * numpy.log(p_x / q_x)
    return accum
json.py 文件源码 项目:airflow 作者: apache-airflow 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def default(self, obj):
        # convert dates and numpy objects in a json serializable format
        if isinstance(obj, datetime):
            return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
        elif isinstance(obj, date):
            return obj.strftime('%Y-%m-%d')
        elif type(obj) in [np.int_, np.intc, np.intp, np.int8, np.int16,
                           np.int32, np.int64, np.uint8, np.uint16,
                           np.uint32, np.uint64]:
            return int(obj)
        elif type(obj) in [np.bool_]:
            return bool(obj)
        elif type(obj) in [np.float_, np.float16, np.float32, np.float64,
                           np.complex_, np.complex64, np.complex128]:
            return float(obj)

        # Let the base class default method raise the TypeError
        return json.JSONEncoder.default(self, obj)
testutils.py 文件源码 项目:deliver 作者: orchestor 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8):
    """
    Returns true if all components of a and b are equal to given tolerances.

    If fill_value is True, masked values considered equal. Otherwise,
    masked values are considered unequal.  The relative error rtol should
    be positive and << 1.0 The absolute error atol comes into play for
    those elements of b that are very small or zero; it says how small a
    must be also.

    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1, d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y))
    return d.ravel()
testutils.py 文件源码 项目:deliver 作者: orchestor 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def almost(a, b, decimal=6, fill_value=True):
    """
    Returns True if a and b are equal up to decimal places.

    If fill_value is True, masked values considered equal. Otherwise,
    masked values are considered unequal.

    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1, d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal)
    return d.ravel()
stats.py 文件源码 项目:BigBrotherBot-For-UrT43 作者: ptitbigorneau 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def achisquare(f_obs,f_exp=None):
    """
    Calculates a one-way chi square for array of observed frequencies and returns
    the result.  If no expected frequencies are given, the total N is assumed to
    be equally distributed across all groups (NOT RIGHT??)

    Usage:   achisquare(f_obs, f_exp=None)   f_obs = array of observed cell freq.
    Returns: chisquare-statistic, associated p-value
    """

    k = len(f_obs)
    if f_exp == None:
        f_exp = N.array([sum(f_obs)/float(k)] * len(f_obs),N.float_)
    f_exp = f_exp.astype(N.float_)
    chisq = N.add.reduce((f_obs-f_exp)**2 / f_exp)
    return chisq, achisqprob(chisq, k-1)
stats.py 文件源码 项目:BigBrotherBot-For-UrT43 作者: ptitbigorneau 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def asquare_of_sums(inarray, dimension=None, keepdims=0):
    """
    Adds the values in the passed array, squares that sum, and returns the
    result.  Dimension can equal None (ravel array first), an integer (the
    dimension over which to operate), or a sequence (operate over multiple
    dimensions).  If keepdims=1, the returned array will have the same
    NUMBER of dimensions as the original.

    Usage:   asquare_of_sums(inarray, dimension=None, keepdims=0)
    Returns: the square of the sum over dim(s) in dimension
    """
    if dimension == None:
        inarray = N.ravel(inarray)
        dimension = 0
    s = asum(inarray,dimension,keepdims)
    if type(s) == N.ndarray:
        return s.astype(N.float_)*s
    else:
        return float(s)*s
stats.py 文件源码 项目:BigBrotherBot-For-UrT43 作者: ptitbigorneau 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def arankdata(inarray):
    """
    Ranks the data in inarray, dealing with ties appropritely.  Assumes
    a 1D inarray.  Adapted from Gary Perlman's |Stat ranksort.

    Usage:   arankdata(inarray)
    Returns: array of length equal to inarray, containing rank scores
    """
    n = len(inarray)
    svec, ivec = ashellsort(inarray)
    sumranks = 0
    dupcount = 0
    newarray = N.zeros(n,N.float_)
    for i in range(n):
        sumranks = sumranks + i
        dupcount = dupcount + 1
        if i==n-1 or svec[i] <> svec[i+1]:
            averank = sumranks / float(dupcount) + 1
            for j in range(i-dupcount+1,i+1):
                newarray[ivec[j]] = averank
            sumranks = 0
            dupcount = 0
    return newarray
anywords_traindata.py 文件源码 项目:gearbot 作者: g34r 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def load_data(path, seq_length):
    with open(path) as file:
        content = file.read().strip()
        key = sorted(list(set(content)))

        dataX = []
        dataY = []

        for i in range(0, len(content) - seq_length, 1):
            seq_in = content[i:i+seq_length]
            seq_out = content[i+seq_length]
            dataX.append(encode_vals(seq_in, key))
            dataY.append(encode(seq_out, key))

        X = np.reshape(dataX, (len(dataX), seq_length, len(key)))
        X = np.float_(X)
        Y = np.asarray(dataY)

        return (X, Y, key)
type.py 文件源码 项目:lim 作者: limix 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def npy2py_type(npy_type):
    int_types = [
        np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64,
        np.uint8, np.uint16, np.uint32, np.uint64
    ]

    float_types = [np.float_, np.float16, np.float32, np.float64]

    bytes_types = [np.str_, np.string_]

    if npy_type in int_types:
        return int
    if npy_type in float_types:
        return float
    if npy_type in bytes_types:
        return bytes

    if hasattr(npy_type, 'char'):
        if npy_type.char in ['S', 'a']:
            return bytes
        raise TypeError

    return npy_type
average.py 文件源码 项目:capriqorn 作者: bio-phys 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def apply_rescaling(self, frm_in, frm_out, n_avg, virtual_param):
        """Apply rescaling and averaging operations."""
        frm_out.i = frm_in.i
        # --- perform averaging on histograms
        val = np.float_(self.factor) / np.float_(n_avg)
        # --- rescale distance histograms
        if frm_out.has_key(base.loc_histograms):
            X = frm_out.get_data(base.loc_histograms)
            dict_util.scale_values(X, val)
        # --- multiref: rescale shell XX histogram
        if (virtual_param is not None and self.geometry == 'MultiReferenceStructure'):
            if frm_out.has_key(base.loc_shell_Hxx):
                X = frm_out.get_data(base.loc_shell_Hxx)
                dict_util.scale_values(X, val)
        # --- rescale length histograms
        if frm_out.has_key(base.loc_len_histograms):
            X = frm_out.get_data(base.loc_len_histograms)
            dict_util.scale_values(X, val)
        # ---
        frm_out.put_data('log', frm_in.get_data('log'))
        frm_out.put_meta(self.get_meta(n_avg=n_avg))


问题


面经


文章

微信
公众号

扫码关注公众号