python类atleast_1d()的实例源码

utils.py 文件源码 项目:pyGAM 作者: dswah 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def make_2d(array):
    """
    tiny tool to expand 1D arrays the way i want

    Parameters
    ----------
    array : array-like

    Returns
    -------
    np.array of with ndim = 2
    """
    array = np.asarray(array)
    if array.ndim < 2:
        msg = 'Expected 2D input data array, but found {}D. '\
              'Expanding to 2D.'.format(array.ndim)
        warnings.warn(msg)
        array = np.atleast_1d(array)[:,None]
    return array
utils.py 文件源码 项目:pyGAM 作者: dswah 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def round_to_n_decimal_places(array, n=3):
    """
    tool to keep round a float to n decimal places.

    n=3 by default

    Parameters
    ----------
    array : np.array
    n : int. number of decimal places to keep

    Returns
    -------
    array : rounded np.array
    """
    # check if in scientific notation
    if issubclass(array.__class__, float) and '%.e'%array == str(array):
        return array # do nothing

    shape = np.shape(array)
    out = ((np.atleast_1d(array) * 10**n).round().astype('int') / (10.**n))
    return out.reshape(shape)
utils.py 文件源码 项目:pyGAM 作者: dswah 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def ylogydu(y, u):
    """
    tool to give desired output for the limit as y -> 0, which is 0

    Parameters
    ----------
    y : array-like of len(n)
    u : array-like of len(n)

    Returns
    -------
    np.array len(n)
    """
    mask = (np.atleast_1d(y)!=0.)
    out = np.zeros_like(u)
    out[mask] = y[mask] * np.log(y[mask] / u[mask])
    return out
extract_features.py 文件源码 项目:DeepFormants 作者: MLSpeech 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def atal(x, order, num_coefs):
    x = np.atleast_1d(x)
    n = x.size
    if x.ndim > 1:
        raise ValueError("Only rank 1 input supported for now.")
    if not np.isrealobj(x):
        raise ValueError("Only real input supported for now.")
    a, e, kk = lpc(x, order)
    c = np.zeros(num_coefs)
    c[0] = a[0]
    for m in range(1, order+1):
        c[m] = - a[m]
        for k in range(1, m):
            c[m] += (float(k)/float(m)-1)*a[k]*c[m-k]
    for m in range(order+1, num_coefs):
        for k in range(1, order+1):
            c[m] += (float(k)/float(m)-1)*a[k]*c[m-k]
    return c
eigenfunctions.py 文件源码 项目:pyinduct 作者: pyinduct 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def return_real_part(to_return):
    """
    Check if the imaginary part of to_return vanishes
    and return the real part
    :param to_return:
    :return:
    """
    if not isinstance(to_return, (Number, list, np.ndarray)):
        raise TypeError
    if isinstance(to_return, (list, np.ndarray)):
        if not all([isinstance(num, Number) for num in to_return]):
            raise TypeError

    maybe_real = np.atleast_1d(np.real_if_close(to_return))

    if maybe_real.dtype == 'complex':
        raise ValueError("Something goes wrong, imaginary part does not vanish")
    else:
        if maybe_real.shape == (1,):
            maybe_real = maybe_real[0]
        return maybe_real
simulation.py 文件源码 项目:pyinduct 作者: pyinduct 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def __init__(self, bounds=None, num=None, step=None, points=None):
        if points is not None:
            # points are given, easy one
            self._values = np.atleast_1d(points)
            self._limits = (points.min(), points.max())
            self._num = points.size
            # TODO check for evenly spaced entries
            # for now just use provided information
            self._step = step
        elif bounds and num:
            self._limits = bounds
            self._num = num
            self._values, self._step = np.linspace(bounds[0], bounds[1], num, retstep=True)
            if step is not None and not np.isclose(self._step, step):
                raise ValueError("could not satisfy both redundant requirements for num and step!")
        elif bounds and step:
            self._limits = bounds
            # calculate number of needed points but save correct step size
            self._num = int((bounds[1] - bounds[0]) / step + 1.5)
            self._values, self._step = np.linspace(bounds[0], bounds[1], self._num, retstep=True)
            if np.abs(step - self._step) > 1e-1:
                warnings.warn("desired step-size {} doesn't fit to given interval,"
                              " changing to {}".format(step, self._step))
        else:
            raise ValueError("not enough arguments provided!")
trajectory.py 文件源码 项目:pyinduct 作者: pyinduct 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def power_series(z, t, C, spatial_der_order=0):

    if not all([isinstance(item, (Number, np.ndarray)) for item in [z, t]]):
        raise TypeError
    z = np.atleast_1d(z)
    t = np.atleast_1d(t)
    if not all([len(item.shape) == 1 for item in [z, t]]):
        raise ValueError

    x = np.nan*np.zeros((len(t), len(z)))
    for i in range(len(z)):
        sum_x = np.zeros(t.shape[0])
        for j in range(len(C)-spatial_der_order):
            sum_x += C[j+spatial_der_order][0, :]*z[i]**j/sm.factorial(j)
        x[:, i] = sum_x

    if any([dim == 1 for dim in x.shape]):
        x = x.flatten()

    return x
core.py 文件源码 项目:pyinduct 作者: pyinduct 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _check_domain(self, value):
        """
        checks if value fits into domain

        :param value: point(s) where function shall be evaluated
        :raises: ValueError if value not in domain
        """
        in_domain = False
        value = np.atleast_1d(value)
        for interval in self.domain:
            if all(value >= interval[0]) and all(value <= interval[1]):
                in_domain = True
                break

        if not in_domain:
            raise ValueError("Function evaluated outside its domain!")
test__numdiff.py 文件源码 项目:ip-nonlinear-solver 作者: antonior92 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def test_scalar_vector(self):
        x0 = 0.5
        jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0,
                                       method='2-point',
                                       as_linear_operator=True)
        jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0,
                                       as_linear_operator=True)
        jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0,
                                       method='cs',
                                       as_linear_operator=True)
        jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
        np.random.seed(1)
        for i in range(10):
            p = np.random.uniform(-10, 10, size=(1,))
            assert_allclose(jac_diff_2.dot(p), jac_true.dot(p),
                            rtol=1e-5)
            assert_allclose(jac_diff_3.dot(p), jac_true.dot(p),
                            rtol=5e-6)
            assert_allclose(jac_diff_4.dot(p), jac_true.dot(p),
                            rtol=5e-6)
test__numdiff.py 文件源码 项目:ip-nonlinear-solver 作者: antonior92 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_vector_scalar(self):
        x0 = np.array([100.0, -0.5])
        jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
                                       method='2-point',
                                       as_linear_operator=True)
        jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0,
                                       as_linear_operator=True)
        jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
                                       method='cs',
                                       as_linear_operator=True)
        jac_true = self.jac_vector_scalar(x0)
        np.random.seed(1)
        for i in range(10):
            p = np.random.uniform(-10, 10, size=x0.shape)
            assert_allclose(jac_diff_2.dot(p), np.atleast_1d(jac_true.dot(p)),
                            rtol=1e-5)
            assert_allclose(jac_diff_3.dot(p), np.atleast_1d(jac_true.dot(p)),
                            rtol=5e-6)
            assert_allclose(jac_diff_4.dot(p), np.atleast_1d(jac_true.dot(p)),
                            rtol=1e-7)
gcs2.py 文件源码 项目:pi_gcs 作者: lbusoni 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _getterChannels(self, channels, gcsFunction, valueArrayClass):
        chArray= np.atleast_1d(channels)
        value= valueArrayClass([0] * len(chArray))
        gcsFunction.argtypes= [c_int, CIntArray, valueArrayClass, c_int]
        self._convertErrorToException(
            gcsFunction(self._id,
                        CIntArray(chArray),
                        value,
                        len(chArray)))
        return value.toNumpyArray()
gcs2.py 文件源码 项目:pi_gcs 作者: lbusoni 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _setterChannels(self, channels, value, gcsFunction, valueArrayClass):
        valueArray= np.atleast_1d(value)
        assert len(channels) == len(valueArray)
        gcsFunction.argtypes= [c_int, CIntArray, valueArrayClass, c_int]
        self._convertErrorToException(
            gcsFunction(self._id,
                        CIntArray(channels),
                        valueArrayClass(valueArray),
                        len(channels)))
gcs2.py 文件源码 项目:pi_gcs 作者: lbusoni 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _setterAxes(self, axesString, value, gcsFunction, valueArrayClass):
        nCh= len(axesString.split())
        valueArray= np.atleast_1d(value)
        assert nCh == len(valueArray)
        gcsFunction.argtypes= [c_int, c_char_p, valueArrayClass]
        self._convertErrorToException(
            gcsFunction(self._id, axesString, valueArrayClass(valueArray)))
gcs2.py 文件源码 项目:pi_gcs 作者: lbusoni 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def setServoControlMode(self, axesString, controlMode):
        self._setterAxes(
            axesString,
            np.atleast_1d(controlMode).astype('int'),
            self._lib.PI_SVO,
            CIntArray)
fake_gcs2.py 文件源码 项目:pi_gcs 作者: lbusoni 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _arrayToDict(self, dicto, keys, values):
        valueArray= np.atleast_1d(values)
        assert len(keys) == len(valueArray), \
            "%d %d" % (len(keys), len(valueArray))
        for i in range(len(keys)):
            dicto[keys[i]]= valueArray[i]
mcmc_sampler.py 文件源码 项目:bnn-analysis 作者: myshkov 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, loss_fn=None, initial_position=None, test_model=None, batch_size=None, burn_in=0,
                 step_sizes=.0001, step_probabilities=1., **kwargs):
        """
        Creates a new MCMC_sampler object.

        :param loss_fn: Target loss function without regularisaion terms
        :param initial_position: Initial network weights as a 2-d array of shape [number of chains, number of weights]
        :param test_model: The model used on the test data. Default=None
        :param batch_size: Batch size used for stochastic sampling methods. Default=None
        :param burn_in: Number of burn-in samples. Default=0
        :param step_sizes: Step size or a list of step sizes. Default=.0001
        :param step_probabilities: Probabilities to choose a step from step_sizes, must sum to 1. Default=1
        """

        super().__init__(**kwargs)
        self.loss_fn = loss_fn
        self.test_model = test_model

        self.initial_position = np.asarray(initial_position, dtype=np.float32)
        self.position_shape = self.initial_position.shape
        self.position_size = self.initial_position.shape[1]  # total number of parameters of one network

        # data and parameter shapes
        self.chains_num = self.initial_position.shape[0]  # number of chains to run in parallel
        self.batch_size = batch_size if batch_size is not None else self.train_size
        self.batch_x_shape = (self.batch_size, self.input_dim)
        self.batch_y_shape = (self.batch_size, self.output_dim)

        # common parameters
        self.step_sizes = np.atleast_1d(np.asarray(step_sizes, dtype=np.float32))
        self.step_probabilities = np.atleast_1d(np.asarray(step_probabilities, dtype=np.float32))
        self.burn_in = burn_in
        self.step_multiplier = np.ones(shape=(self.chains_num,), dtype=np.float32)

        # monitor acceptance rate for reporting
        self.avg_acceptance_rate = np.ones(shape=(self.chains_num,), dtype=np.float32)
        self.avg_acceptance_rate_lambda = 0.99
        self._has_burned_in = False
transformations.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def vector_norm(data, axis=None, out=None):
    """Return length, i.e. eucledian norm, of ndarray along axis.

    >>> v = numpy.random.random(3)
    >>> n = vector_norm(v)
    >>> numpy.allclose(n, numpy.linalg.norm(v))
    True
    >>> v = numpy.random.rand(6, 5, 3)
    >>> n = vector_norm(v, axis=-1)
    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
    True
    >>> n = vector_norm(v, axis=1)
    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
    True
    >>> v = numpy.random.rand(5, 4, 3)
    >>> n = numpy.empty((5, 3), dtype=numpy.float64)
    >>> vector_norm(v, axis=1, out=n)
    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
    True
    >>> vector_norm([])
    0.0
    >>> vector_norm([1.0])
    1.0

    """
    data = numpy.array(data, dtype=numpy.float64, copy=True)
    if out is None:
        if data.ndim == 1:
            return math.sqrt(numpy.dot(data, data))
        data *= data
        out = numpy.atleast_1d(numpy.sum(data, axis=axis))
        numpy.sqrt(out, out)
        return out
    else:
        data *= data
        numpy.sum(data, axis=axis, out=out)
        numpy.sqrt(out, out)
transformations.py 文件源码 项目:Neural-Networks-for-Inverse-Kinematics 作者: paramrajpura 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def vector_norm(data, axis=None, out=None):
    """Return length, i.e. Euclidean norm, of ndarray along axis.

    >>> v = numpy.random.random(3)
    >>> n = vector_norm(v)
    >>> numpy.allclose(n, numpy.linalg.norm(v))
    True
    >>> v = numpy.random.rand(6, 5, 3)
    >>> n = vector_norm(v, axis=-1)
    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
    True
    >>> n = vector_norm(v, axis=1)
    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
    True
    >>> v = numpy.random.rand(5, 4, 3)
    >>> n = numpy.empty((5, 3))
    >>> vector_norm(v, axis=1, out=n)
    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
    True
    >>> vector_norm([])
    0.0
    >>> vector_norm([1])
    1.0

    """
    data = numpy.array(data, dtype=numpy.float64, copy=True)
    if out is None:
        if data.ndim == 1:
            return math.sqrt(numpy.dot(data, data))
        data *= data
        out = numpy.atleast_1d(numpy.sum(data, axis=axis))
        numpy.sqrt(out, out)
        return out
    else:
        data *= data
        numpy.sum(data, axis=axis, out=out)
        numpy.sqrt(out, out)
position_hand.py 文件源码 项目:SLP-Annotator 作者: PhonologicalCorpusTools 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def vector_norm(data, axis=None, out=None):
    """Return length, i.e. Euclidean norm, of ndarray along axis.

    >>> v = numpy.random.random(3)
    >>> n = vector_norm(v)
    >>> numpy.allclose(n, numpy.linalg.norm(v))
    True
    >>> v = numpy.random.rand(6, 5, 3)
    >>> n = vector_norm(v, axis=-1)
    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
    True
    >>> n = vector_norm(v, axis=1)
    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
    True
    >>> v = numpy.random.rand(5, 4, 3)
    >>> n = numpy.empty((5, 3))
    >>> vector_norm(v, axis=1, out=n)
    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
    True
    >>> vector_norm([])
    0.0
    >>> vector_norm([1])
    1.0

    """
    data = numpy.array(data, dtype=numpy.float64, copy=True)
    if out is None:
        if data.ndim == 1:
            return math.sqrt(numpy.dot(data, data))
        data *= data
        out = numpy.atleast_1d(numpy.sum(data, axis=axis))
        numpy.sqrt(out, out)
        return out
    else:
        data *= data
        numpy.sum(data, axis=axis, out=out)
        numpy.sqrt(out, out)
controls.py 文件源码 项目:mbin 作者: fanglab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def chunk_control_matrices( self, control_ipds_fn, control_ipds_N_fn, control_kmers_fn ):
        """

        """
        kmers       = np.atleast_1d(np.loadtxt(control_kmers_fn, dtype="str"))
        fns         = [control_ipds_fn, control_ipds_N_fn]
        n_chunks    = 99
        chunksize   = int(math.ceil(float( len(kmers)/n_chunks )))
        cols_chunks = list(chunks( range(len(kmers)), chunksize ))
        args        = []
        for i,cols_chunk in enumerate(cols_chunks):
            cut_CMDs = []
            for fn in fns:
                cut_cols = "%s-%s" % ((cols_chunk[0]+1), (cols_chunk[-1]+1))
                in_fn    = fn
                out_fn   = fn+".sub.%s" % i
                cut_CMD  = "cut -d$\'\\t\' -f%s %s > %s" % (cut_cols, in_fn, out_fn)
                cut_CMDs.append(cut_CMD)
            args.append( (i, cut_CMDs, kmers, cols_chunk, n_chunks, self.opts.min_motif_count) )

        results = mbin.launch_pool(self.opts.procs, process_contig_chunk, args)

        logging.info("Combining motifs from all chunks of control data...")
        not_found     = 0
        control_means = {}
        for i,result in enumerate(results):
            not_found += result[1]
            for motif in result[0].keys():
                control_means[motif] = result[0][motif]
        logging.info("Done.")

        return control_means,not_found


问题


面经


文章

微信
公众号

扫码关注公众号