python类atleast_1d()的实例源码

array_stream.py 文件源码 项目:npstreams 作者: LaurentRDC 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def array_stream(func):
    """ 
    Decorates streaming functions to make sure that the stream
    is a stream of ndarrays. Objects that are not arrays are transformed 
    into arrays. If the stream is in fact a single ndarray, this ndarray 
    is repackaged into a sequence of length 1.

    The first argument of the decorated function is assumed to be an iterable of
    arrays, or an iterable of objects that can be casted to arrays.
    """
    @wraps(func)    # thanks functools
    def decorated(arrays, *args, **kwargs):
        if isinstance(arrays, ndarray):
            arrays = (arrays,)
        return func(map(atleast_1d, arrays), *args, **kwargs)
    return decorated
discrete_action_predictor.py 文件源码 项目:BlueWhale 作者: caffe2 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def predict(self, states):
        """ Returns values for each state
        :param states states as feature -> value dict
        """
        previous_workspace = workspace.CurrentWorkspace()
        workspace.SwitchWorkspace(self._workspace_id)
        for input_blob in states:
            workspace.FeedBlob(
                input_blob,
                np.atleast_1d(states[input_blob]).astype(np.float32)
            )
        workspace.RunNet(self._net)
        result = {
            output: workspace.FetchBlob(output)
            for output in self._output_blobs
        }
        workspace.SwitchWorkspace(previous_workspace)
        return result
invResults.py 文件源码 项目:BISIP 作者: clberube 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def print_resul(sol):
#==============================================================================
    # Impression des résultats
    pm, model, filename = sol.pm, sol.model, sol.filename
    print('\n\nInversion success!')
    print('Name of file:', filename)
    print('Model used:', model)
    try:
        pm.pop("cond_std")
        pm.pop("tau_i_std")
        pm.pop("m_i_std")
    except:
        pass
    e_keys = sorted([s for s in list(pm.keys()) if "_std" in s])

    v_keys = [e.replace("_std", "") for e in e_keys]
    labels = ["{:<8}".format(x+":") for x in v_keys]
    np.set_printoptions(formatter={'float': lambda x: format(x, '6.3E')})
    for l, v, e in zip(labels, v_keys, e_keys):
        if "noise" not in l:
            print(l, np.atleast_1d(pm[v]), '+/-', np.atleast_1d(pm[e]), np.char.mod('(%.2f%%)',abs(100*pm[e]/pm[v])))
        else:
            print(l, np.atleast_1d(pm[v]), '+/-', np.atleast_1d(pm[e]))
invResults.py 文件源码 项目:BISIP 作者: clberube 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def print_resul(sol):
#==============================================================================
    # Impression des résultats
    pm, model, filename = sol.pm, sol.model, sol.filename
    print('\n\nInversion success!')
    print('Name of file:', filename)
    print('Model used:', model)
    try:
        pm.pop("cond_std")
        pm.pop("tau_i_std")
        pm.pop("m_i_std")
    except:
        pass
    e_keys = sorted([s for s in list(pm.keys()) if "_std" in s])

    v_keys = [e.replace("_std", "") for e in e_keys]
    labels = ["{:<8}".format(x+":") for x in v_keys]
    np.set_printoptions(formatter={'float': lambda x: format(x, '6.3E')})
    for l, v, e in zip(labels, v_keys, e_keys):
        if "noise" not in l:
            print(l, np.atleast_1d(pm[v]), '+/-', np.atleast_1d(pm[e]), np.char.mod('(%.2f%%)',abs(100*pm[e]/pm[v])))
        else:
            print(l, np.atleast_1d(pm[v]), '+/-', np.atleast_1d(pm[e]))
test_linalg.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def do(self, a, b):
        d = linalg.det(a)
        (s, ld) = linalg.slogdet(a)
        if asarray(a).dtype.type in (single, double):
            ad = asarray(a).astype(double)
        else:
            ad = asarray(a).astype(cdouble)
        ev = linalg.eigvals(ad)
        assert_almost_equal(d, multiply.reduce(ev, axis=-1))
        assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))

        s = np.atleast_1d(s)
        ld = np.atleast_1d(ld)
        m = (s != 0)
        assert_almost_equal(np.abs(s[m]), 1)
        assert_equal(ld[~m], -inf)
hetr.py 文件源码 项目:ngraph 作者: NervanaSystems 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def mlsl_gather_send(self, gather_send_id, x_nparr):
        gather_send_op = self.gather_send_nodes[gather_send_id]

        # todo: get real root_idx
        root_idx = 0

        # np.atleast_1d is used in cases when we need to reduce to a scalar value
        x_nparr = np.atleast_1d(x_nparr)
        if self.process_idx == root_idx:
            # todo: remove that workaround for non-symmetric case
            gather_send_op.arr = x_nparr
        else:
            send_buf = self.as_buffer(x_nparr)
            send_count = x_nparr.size
            recv_buf = None
            if gather_send_op.use_reduce:
                req = self.distribution.reduce(send_buf, send_buf, send_count,
                                               mlsl.DataType.FLOAT, mlsl.ReductionType.SUM,
                                               root_idx, mlsl.GroupType.DATA)
            else:
                req = self.distribution.gather(send_buf, send_count, recv_buf,
                                               mlsl.DataType.FLOAT, root_idx,
                                               mlsl.GroupType.DATA)
            self.mlsl_obj.wait(req)
rw.py 文件源码 项目:nanopores 作者: mitschabaude 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def NLS_annealing(F, xi, yi, p, N=100, n=10, sigma=5.,factor=0.5):
    # N = size of population in one iteration
    # n = number of iterations
    # sigma = initial (multiplicative) standard deviation
    # factor = factor to reduce sigma per iteration
    print "initial", p
    p = np.atleast_1d(p)
    dim = len(p)
    # make initial sigma act like multiplication by sigma^(+-1)
    sigma = np.log(sigma)*np.ones(dim)

    for k in range(n):
        # create new population by adding multiplicative gaussian noise
        P = p[None, :] * np.exp(np.random.randn(N, dim) * sigma[None, :])
        # compute mean square loss on population
        f = np.mean((F(xi[None, :], P) - yi)**2, 1)
        # replace p by new best guess
        p = P[np.argmin(f), :]
        # update sigma
        sigma *= factor
        print "parameters:", p
    print "minimum", min(f)

    return tuple(p)
pac.py 文件源码 项目:tensorpac 作者: EtienneCmb 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _idcheck(self, idpac):
        """Check the idpac parameter."""
        idpac = np.atleast_1d(idpac)
        self._csuro = True
        if not all([isinstance(k, int) for k in idpac]) and (len(idpac) != 3):
            raise ValueError("idpac must be a tuple/list of 3 integers.")
        else:
            # Ozkurt PAC case :
            if idpac[0] == 4:
                idpac = np.array([4, 0, 0])
                self._csuro = False
            if (idpac[1] == 0) or (idpac[2] == 0):
                self._csuro = False
                idpac = (idpac[0], 0, 0)
        self._idpac = idpac
        self.method, self.surro, self.norm = pacstr(idpac)
generic_simulation_modules.py 文件源码 项目:pymoskito 作者: cklb 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, settings):
        Solver.__init__(self, settings)

        # setup solver
        if hasattr(self._model, "jacobian"):
            self._solver = ode(self._model.state_function,
                               jac=self._model.jacobian)
        else:
            self._solver = ode(self._model.state_function)

        self._solver.set_integrator(self._settings["Mode"],
                                    method=self._settings["Method"],
                                    rtol=self._settings["rTol"],
                                    atol=self._settings["aTol"],
                                    max_step=self._settings["step size"]
                                    )
        self._solver.set_initial_value(np.atleast_1d(self._model.initial_state),
                                       t=self._settings["start time"])
simulation_core.py 文件源码 项目:pymoskito 作者: cklb 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _calc_module(self, module_name):
        """ Calculates the output of a simulation module
        """
        if module_name in self._simulation_modules.keys():
            if self._counter[module_name] == \
                    self._simulation_modules[module_name].tick_divider:
                self._current_outputs[module_name] = np.atleast_1d(
                    self._simulation_modules[module_name].calc_output(
                        self._input_vector))
                self._counter[module_name] = 1
            else:
                self._counter[module_name] += 1

            # update input vector
            self._input_vector.update(
                {module_name: self._current_outputs[module_name]})
replay.py 文件源码 项目:nelpy 作者: nelpy 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def score_hmm_logprob(bst, hmm, normalize=False):
    """Score events in a BinnedSpikeTrainArray by computing the log
    probability under the model.

    Parameters
    ----------
    bst : BinnedSpikeTrainArray
    hmm : PoissonHMM
    normalize : bool, optional. Default is False.
        If True, log probabilities will be normalized by their sequence
        lengths.
    Returns
    -------
    logprob : array of size (n_events,)
        Log probabilities, one for each event in bst.
    """

    logprob = np.atleast_1d(hmm.score(bst))
    if normalize:
        logprob = np.atleast_1d(logprob) / bst.lengths

    return logprob
core_utils.py 文件源码 项目:ESPEI 作者: PhasesResearchLab 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def get_samples(desired_data):
    all_samples = []
    for data in desired_data:
        temperatures = np.atleast_1d(data['conditions']['T'])
        num_configs = np.array(data['solver'].get('sublattice_configurations'), dtype=np.object).shape[0]
        site_fractions = data['solver'].get('sublattice_occupancies', [[1]] * num_configs)
        site_fraction_product = [reduce(operator.mul, list(itertools.chain(*[np.atleast_1d(f) for f in fracs])), 1)
                                 for fracs in site_fractions]
        # TODO: Subtle sorting bug here, if the interactions aren't already in sorted order...
        interaction_product = []
        for fracs in site_fractions:
            interaction_product.append(float(reduce(operator.mul,
                                                    [f[0] - f[1] for f in fracs if isinstance(f, list) and len(f) == 2],
                                                    1)))
        if len(interaction_product) == 0:
            interaction_product = [0]
        comp_features = zip(site_fraction_product, interaction_product)
        all_samples.extend(list(itertools.product(temperatures, comp_features)))
    return all_samples
count.py 文件源码 项目:SVclone 作者: mcmero 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def get_loc_reads(bp,bamf,max_dp):
    loc = '%s:%d:%d' % (bp['chrom'], max(0,bp['start']), bp['end'])
    loc_reads = np.empty([0,len(dtypes.read_dtype)],dtype=dtypes.read_dtype)
    err_code = 0
    try:
        iter_loc = bamf.fetch(region=loc,until_eof=True)
        for x in iter_loc:
            read = read_to_array(x,bamf)
            if len(np.atleast_1d(read))>0:
                loc_reads = np.append(loc_reads,read)
            if len(loc_reads) > max_dp:
                print('Read depth too high at %s' % loc)
                err_code = 1
                return np.empty(0), err_code
        loc_reads = np.sort(loc_reads,axis=0,order=['query_name','ref_start'])
        loc_reads = np.unique(loc_reads) #remove duplicates
        return loc_reads, err_code
    except ValueError:
        print('Fetching reads failed for loc: %s' % loc)
        err_code = 2
        return np.empty(0), err_code
principal_component_analysis.py 文件源码 项目:ML-From-Scratch 作者: eriklindernoren 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def transform(self, X, n_components):
        """ Fit the dataset to the number of principal components specified in the 
        constructor and return the transformed dataset """
        covariance_matrix = calculate_covariance_matrix(X)

        # Where (eigenvector[:,0] corresponds to eigenvalue[0])
        eigenvalues, eigenvectors = np.linalg.eig(covariance_matrix)

        # Sort the eigenvalues and corresponding eigenvectors from largest
        # to smallest eigenvalue and select the first n_components
        idx = eigenvalues.argsort()[::-1]
        eigenvalues = eigenvalues[idx][:n_components]
        eigenvectors = np.atleast_1d(eigenvectors[:, idx])[:, :n_components]

        # Project the data onto principal components
        X_transformed = X.dot(eigenvectors)

        return X_transformed
linear_discriminant_analysis.py 文件源码 项目:ML-From-Scratch 作者: eriklindernoren 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def fit(self, X, y):
        # Separate data by class
        X1 = X[y == 0]
        X2 = X[y == 1]

        # Calculate the covariance matrices of the two datasets
        cov1 = calculate_covariance_matrix(X1)
        cov2 = calculate_covariance_matrix(X2)
        cov_tot = cov1 + cov2

        # Calculate the mean of the two datasets
        mean1 = X1.mean(0)
        mean2 = X2.mean(0)
        mean_diff = np.atleast_1d(mean1 - mean2)

        # Determine the vector which when X is projected onto it best separates the
        # data by class. w = (mean1 - mean2) / (cov1 + cov2)
        self.w = np.linalg.pinv(cov_tot).dot(mean_diff)
Likelihood.py 文件源码 项目:BayesVP 作者: cameronliang 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self,alpha):
        """
        Posterior distribution

        Returns
        ---------
        lnprob: float
            Natural log of posterior probability
        """

        lp = self.lnprior(alpha)

        if np.isinf(lp):
            return -np.inf
        else:
            return np.atleast_1d(lp + self.lnlike(alpha))[0]
Utilities.py 文件源码 项目:BayesVP 作者: cameronliang 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def convolve_lsf(flux,lsf):
    if len(flux) < len(np.atleast_1d(lsf)):
        # Add padding to make sure to return the same length in flux.
        padding = np.ones(len(lsf)-len(flux)+1)
        flux = np.hstack([padding,flux])

        conv_flux = 1-np.convolve(1-flux,lsf,mode='same') /np.sum(lsf)
        return conv_flux[len(padding):]

    else:
        # convolve 1-flux to remove edge effects wihtout using padding
        return 1-np.convolve(1-flux,lsf,mode='same') /np.sum(lsf)

###############################################################################
# Convergence 
###############################################################################
utils.py 文件源码 项目:graynet 作者: raamana 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def check_subjects(subjects_info):
    "Ensure subjects are provided and their data exist."

    if isinstance(subjects_info, str):
        if not pexists(subjects_info):
            raise IOError('path to subject list does not exist: {}'.format(subjects_info))
        subjects_list = np.genfromtxt(subjects_info, dtype=str)
    elif isinstance(subjects_info, collections.Iterable):
        if len(subjects_info) < 1:
            raise ValueError('Empty subject list.')
        subjects_list = subjects_info
    else:
        raise ValueError('Invalid value provided for subject list. \n '
                         'Must be a list of paths, or path to file containing list of paths, one for each subject.')

    subject_id_list = np.atleast_1d(subjects_list)
    num_subjects = subject_id_list.size
    if num_subjects < 1:
        raise ValueError('Input subject list is empty.')

    num_digits_id_size = len(str(num_subjects))
    max_id_width = max(map(len, subject_id_list))

    return subject_id_list, num_subjects, max_id_width, num_digits_id_size
colorize3_poisson.py 文件源码 项目:SynthText 作者: ankush-me 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self,alpha,color):

        # alpha for the whole image:
        assert alpha.ndim==2
        self.alpha = alpha
        [n,m] = alpha.shape[:2]

        color=np.atleast_1d(np.array(color)).astype('uint8')
        # color for the image:
        if color.ndim==1: # constant color for whole layer
            ncol = color.size
            if ncol == 1 : #grayscale layer
                self.color = color * np.ones((n,m,3),'uint8')
            if ncol == 3 : 
                self.color = np.ones((n,m,3),'uint8') * color[None,None,:]
        elif color.ndim==2: # grayscale image
            self.color = np.repeat(color[:,:,None],repeats=3,axis=2).copy().astype('uint8')
        elif color.ndim==3: #rgb image
            self.color = color.copy().astype('uint8')
        else:
            print color.shape
            raise Exception("color datatype not understood")
test_linalg.py 文件源码 项目:krpcScripts 作者: jwvanderbeck 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def do(self, a, b):
        d = linalg.det(a)
        (s, ld) = linalg.slogdet(a)
        if asarray(a).dtype.type in (single, double):
            ad = asarray(a).astype(double)
        else:
            ad = asarray(a).astype(cdouble)
        ev = linalg.eigvals(ad)
        assert_almost_equal(d, multiply.reduce(ev, axis=-1))
        assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))

        s = np.atleast_1d(s)
        ld = np.atleast_1d(ld)
        m = (s != 0)
        assert_almost_equal(np.abs(s[m]), 1)
        assert_equal(ld[~m], -inf)
geolib.py 文件源码 项目:pygeotools 作者: dshean 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def cT_helper(x, y, z, in_srs, out_srs):
    """Helper function that wraps osr CoordinatTransformation
    """
    x, y, z = np.atleast_1d(x), np.atleast_1d(y), np.atleast_1d(z)
    #Handle cases where z is 0 - probably a better way to use broadcasting for this
    if x.shape[0] != z.shape[0]:
        #Watch out for masked array input here
        orig_z = z[0]
        z = np.zeros_like(x)
        z[:] = orig_z
    orig_shape = x.shape
    cT = osr.CoordinateTransformation(in_srs, out_srs)
    #x2, y2, z2 = zip(*[cT.TransformPoint(*xyz) for xyz in zip(x, y, z)])
    x2, y2, z2 = list(zip(*[cT.TransformPoint(*xyz) for xyz in zip(np.ravel(x),np.ravel(y),np.ravel(z))]))
    if len(x2) == 1:
        x2, y2, z2 = x2[0], y2[0], z2[0] 
    else:
        x2 = np.array(x2).reshape(orig_shape)
        y2 = np.array(y2).reshape(orig_shape)
        z2 = np.array(z2).reshape(orig_shape)
    return x2, y2, z2
bulb_sine.py 文件源码 项目:poke_semantics 作者: apilaskowski 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def imscatter(x, y, image, ax=None, zoom=1):
    if ax is None:
        ax = plt.gca()
    try:
        image = plt.imread(image)
    except TypeError:
        # Likely already an array...
        pass
    im = OffsetImage(image, zoom=zoom)
    x, y = np.atleast_1d(x, y)
    artists = []
    for x0, y0 in zip(x, y):
        ab = AnnotationBbox(im, (x0, y0), xycoords='data', frameon=False)
        artists.append(ax.add_artist(ab))
    ax.update_datalim(np.column_stack([x, y]))
    ax.autoscale()
    return artists
example1d.py 文件源码 项目:pyGPGO 作者: hawk31 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def plotGPGO(gpgo, param):
    param_value = list(param.values())[0][1]
    x_test = np.linspace(param_value[0], param_value[1], 1000).reshape((1000, 1))
    hat = gpgo.GP.predict(x_test, return_std=True)
    y_hat, y_std = hat[0], np.sqrt(hat[1])
    l, u = y_hat - 1.96 * y_std, y_hat + 1.96 * y_std
    fig = plt.figure()
    r = fig.add_subplot(2, 1, 1)
    r.set_title('Fitted Gaussian process')
    plt.fill_between(x_test.flatten(), l, u, alpha=0.2)
    plt.plot(x_test.flatten(), y_hat, color='red', label='Posterior mean')
    plt.legend(loc=0)
    a = np.array([-gpgo._acqWrapper(np.atleast_1d(x)) for x in x_test]).flatten()
    r = fig.add_subplot(2, 1, 2)
    r.set_title('Acquisition function')
    plt.plot(x_test, a, color='green')
    gpgo._optimizeAcq(method='L-BFGS-B', n_start=1000)
    plt.axvline(x=gpgo.best, color='black', label='Found optima')
    plt.legend(loc=0)
    plt.tight_layout()
    plt.savefig(os.path.join(os.getcwd(), 'mthesis_text/figures/chapter3/sine/{}.pdf'.format(i)))
    plt.show()
support_feature_extraction.py 文件源码 项目:laughter 作者: ganesh-srinivas 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _hz_to_mel(frequencies, htk=False):
    frequencies = np.atleast_1d(frequencies)

    if htk:
        return 2595.0 * np.log10(1.0 + frequencies / 700.0)

    # Fill in the linear part
    f_min = 0.0
    f_sp = 200.0 / 3

    mels = (frequencies - f_min) / f_sp

    # Fill in the log-scale part

    min_log_hz = 1000.0                         # beginning of log region (Hz)
    min_log_mel = (min_log_hz - f_min) / f_sp   # same (Mels)
    logstep = np.log(6.4) / 27.0                # step size for log region

    log_t = (frequencies >= min_log_hz)
    mels[log_t] = min_log_mel + np.log(frequencies[log_t]/min_log_hz) / logstep

    return mels
HelperFunctions.py 文件源码 项目:gullikson-scripts 作者: kgullikson88 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def integral(x, y, I, k=10):
    """
    Integrate y = f(x) for x = 0 to a such that the integral = I
    I can be an array.

    Returns the values a that are found.
    """
    I = np.atleast_1d(I)

    f = UnivariateSpline(x, y, s=k)

    # Integrate as a function of x
    F = f.antiderivative()
    Y = F(x)

    a = []
    for intval in I:
        F2 = UnivariateSpline(x, Y/Y[-1] - intval, s=0)
        a.append(F2.roots())

    return np.hstack(a)
test_linalg.py 文件源码 项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda 作者: SignalMedia 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def do(self, a, b):
        d = linalg.det(a)
        (s, ld) = linalg.slogdet(a)
        if asarray(a).dtype.type in (single, double):
            ad = asarray(a).astype(double)
        else:
            ad = asarray(a).astype(cdouble)
        ev = linalg.eigvals(ad)
        assert_almost_equal(d, multiply.reduce(ev, axis=-1))
        assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))

        s = np.atleast_1d(s)
        ld = np.atleast_1d(ld)
        m = (s != 0)
        assert_almost_equal(np.abs(s[m]), 1)
        assert_equal(ld[~m], -inf)
lake_model.py 文件源码 项目:QuantEcon.lectures.code 作者: QuantEcon 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def simulate_stock_path(self, X0, T):
        r"""
        Simulates the the sequence of Employment and Unemployent stocks

        Parameters
        ------------
        X0 : array 
            Contains initial values (E0, U0)
        T : int
            Number of periods to simulate

        Returns
        --------- 
        X : iterator 
            Contains sequence of employment and unemployment stocks
        """

        X = np.atleast_1d(X0)  # Recast as array just in case
        for t in range(T):
            yield X
            X = self.A @ X
lake_model.py 文件源码 项目:QuantEcon.lectures.code 作者: QuantEcon 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def simulate_rate_path(self, x0, T):
        r"""
        Simulates the the sequence of employment and unemployent rates.

        Parameters
        ------------
        x0 : array 
            Contains initial values (e0,u0)
        T : int
            Number of periods to simulate

        Returns
        ---------
        x : iterator 
            Contains sequence of employment and unemployment rates

        """
        x = np.atleast_1d(x0)  # Recast as array just in case
        for t in range(T):
            yield x
            x = self.A_hat @ x
test_linalg.py 文件源码 项目:aws-lambda-numpy 作者: vitolimandibhrata 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def do(self, a, b):
        d = linalg.det(a)
        (s, ld) = linalg.slogdet(a)
        if asarray(a).dtype.type in (single, double):
            ad = asarray(a).astype(double)
        else:
            ad = asarray(a).astype(cdouble)
        ev = linalg.eigvals(ad)
        assert_almost_equal(d, multiply.reduce(ev, axis=-1))
        assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))

        s = np.atleast_1d(s)
        ld = np.atleast_1d(ld)
        m = (s != 0)
        assert_almost_equal(np.abs(s[m]), 1)
        assert_equal(ld[~m], -inf)
tsne.py 文件源码 项目:CNN_Visualization 作者: albioTQ 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def imagesPlot(images, positions, zoom=0.25):
    fig, ax = plt.subplots()

    for num in range(len(images)):

        x = positions[num, 0]
        y = positions[num, 1]
        image = images[num]

        im = OffsetImage(image, zoom=zoom)
        x, y = np.atleast_1d(x, y)

        for x0, y0 in zip(x, y):
            ab = AnnotationBbox(im, (x0, y0), xycoords='data', frameon=False)
            ax.add_artist(ab)

        ax.update_datalim(np.column_stack([x, y]))
        ax.autoscale()

    plt.show()


问题


面经


文章

微信
公众号

扫码关注公众号