python类nan_to_num()的实例源码

test_transforms.py 文件源码 项目:zipline-chinese 作者: zhanghan1990 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test_vwap(context, data):
        """
        Tests the vwap transform by manually keeping track of the prices
        and volumes in a naiive way and asserting that our hand-rolled vwap is
        the same
        """
        mins = sum(context.mins_for_days[-context.days:])
        for sid in data:
            prices = context.price_bars[sid][-mins:]
            vols = context.vol_bars[sid][-mins:]
            manual_vwap = sum(
                map(operator.mul, np.nan_to_num(np.array(prices)), vols),
            ) / sum(vols)

            assert_allclose(
                data[sid].vwap(context.days),
                manual_vwap,
            )
metrics.py 文件源码 项目:bnn-analysis 作者: myshkov 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def kl_divergence(p_samples, q_samples):
    # estimate densities
    # p_samples = np.nan_to_num(p_samples)
    # q_samples = np.nan_to_num(q_samples)

    if isinstance(p_samples, tuple):
        idx, p_samples = p_samples

        if idx not in _cached_p_pdf:
            _cached_p_pdf[idx] = sc.gaussian_kde(p_samples)

        p_pdf = _cached_p_pdf[idx]
    else:
        p_pdf = sc.gaussian_kde(p_samples)

    q_pdf = sc.gaussian_kde(q_samples)

    # joint support
    left = min(min(p_samples), min(q_samples))
    right = max(max(p_samples), max(q_samples))

    p_samples_num = p_samples.shape[0]
    q_samples_num = q_samples.shape[0]

    # quantise
    lin = np.linspace(left, right, min(max(p_samples_num, q_samples_num), MAX_GRID_POINTS))
    p = p_pdf.pdf(lin)
    q = q_pdf.pdf(lin)

    # KL
    kl = min(sc.entropy(p, q), MAX_KL)

    return kl
metrics.py 文件源码 项目:GraphTime 作者: GlooperLabs 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def f_score(Theta_true, Theta_est, beta=1, eps=1e-6, per_ts=False):
    """Compute f1 score in the same manner as `precision` and `recall`.
    Therefore see those two functions for the respective waiting and per_ts
    explanation.

    Parameters
    ----------
    Theta_true : 3D ndarray, shape (timesteps, n_vertices, n_vertices)
    Theta_est : 3D ndarray, shape (timesteps, n_vertices, n_vertices)
    beta : float (default 1)
        beta value of the F score to be computed
    eps : float
    per_ts : bool
        whether to compute average or per timestep recall

    Returns
    -------
    ndarray or float
        recall list or single precision value
    """
    prec = precision(Theta_true, Theta_est, eps, per_ts=True)
    rec = recall(Theta_true, Theta_est, eps, per_ts=True)
    with np.errstate(divide='ignore', invalid='ignore'):
        nom = (1 + beta**2) * prec * rec
        print(beta**2 * prec)
        den = beta**2 * prec + rec
        f = np.nan_to_num(np.true_divide(nom, den))
    return f if per_ts else np.sum(f) / len(Theta_true)
metrics.py 文件源码 项目:GraphTime 作者: GlooperLabs 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def global_f_score(Theta_true, Theta_est, beta=1, eps=1e-6):
    """In line with `global_precision` and `global_recall`, compute the
    global f score given true and estimated graphical structures. The
    f score has the only parameter beta.

    Parameters
    ----------
    Theta_true : 3D ndarray, shape (timesteps, n_vertices, n_vertices)
    Theta_est : 3D ndarray, shape (timesteps, n_vertices, n_vertices)
    beta : float (default 1)
        beta value of the F score to be computed
    eps : float
    per_ts : bool
        whether to compute average or per timestep recall

    Returns
    -------
    float f-beta score
    """
    assert Theta_est.shape == Theta_true.shape
    d = Theta_true.shape[1]
    n = len(Theta_est)
    tps = fps = fns = tns = 0
    for i in range(n):
        est_edges = set(get_edges(Theta_est[i], eps))
        gt_edges = set(get_edges(Theta_true[i], eps))
        n_joint = len(est_edges.intersection(gt_edges))
        tps += n_joint
        fps += len(est_edges) - n_joint
        fns += len(gt_edges) - n_joint
        tns += d**2 - d - tps - fps - fns
    nom = (1 + beta**2) * tps
    denom = nom + beta**2 * fns + fps
    with np.errstate(divide='ignore', invalid='ignore'):
        f = np.nan_to_num(np.true_divide(nom, denom))
        return f
pynufft_gpu.py 文件源码 项目:pynufft 作者: jyhmiinlin 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def nufft_alpha_kb_fit(N, J, K):
    '''
    find out parameters alpha and beta
    of scaling factor st['sn']
    Note, when J = 1 , alpha is hardwired as [1,0,0...]
    (uniform scaling factor)
    '''
    beta = 1
    Nmid = (N - 1.0) / 2.0
    if N > 40:
        L = 13
    else:
        L = numpy.ceil(N / 3)

    nlist = numpy.arange(0, N) * 1.0 - Nmid
    (kb_a, kb_m) = kaiser_bessel('string', J, 'best', 0, K / N)
    if J > 1:
        sn_kaiser = 1 / kaiser_bessel_ft(nlist / K, J, kb_a, kb_m, 1.0)
    elif J == 1:  # The case when samples are on regular grids
        sn_kaiser = numpy.ones((1, N), dtype=dtype)
    gam = 2 * numpy.pi / K
    X_ant = beta * gam * nlist.reshape((N, 1), order='F')
    X_post = numpy.arange(0, L + 1)
    X_post = X_post.reshape((1, L + 1), order='F')
    X = numpy.dot(X_ant, X_post)  # [N,L]
    X = numpy.cos(X)
    sn_kaiser = sn_kaiser.reshape((N, 1), order='F').conj()
    X = numpy.array(X, dtype=dtype)
    sn_kaiser = numpy.array(sn_kaiser, dtype=dtype)
    coef = numpy.linalg.lstsq(numpy.nan_to_num(X), numpy.nan_to_num(sn_kaiser))[0]  # (X \ sn_kaiser.H);
    alphas = coef
    if J > 1:
        alphas[0] = alphas[0]
        alphas[1:] = alphas[1:] / 2.0
    elif J == 1:  # cases on grids
        alphas[0] = 1.0
        alphas[1:] = 0.0
    alphas = numpy.real(alphas)
    return (alphas, beta)
tools.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def feat_eeg(signals):
    """
    calculate the relative power as defined by Leangkvist (2012),
    assuming signal is recorded with 100hz
    """
    if signals.ndim == 1: signals = np.expand_dims(signals,0)

    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    feats = np.zeros((signals.shape[0],9),dtype='float32')
    # 5 FEATURE for freq babnds
    w = (fft(signals,axis=1)).real
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    spindle = np.sum(np.abs(w[:,np.arange(12*nsamp/sfreq,14*nsamp/sfreq, dtype=int)]),axis=1)
    sum_abs_pow = delta + theta + alpha + beta + gamma + spindle
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = spindle /sum_abs_pow
    feats[:,6] = np.log10(stats.kurtosis(signals, fisher=False, axis=1))        # kurtosis
    feats[:,7] = np.log10(-np.sum([(x/nsamp)*(np.log(x/nsamp)) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    #feats[:,7] = np.polynomial.polynomial.polyfit(np.log(f[np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]), np.log(w[0,np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),1)
    feats[:,8] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5)
    if np.any(feats==np.nan): print('NaN detected')
    return np.nan_to_num(feats)
tools.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def feat_wavelet(signals):
    """
    calculate the relative power as defined by Leangkvist (2012),
    assuming signal is recorded with 100hz
    """
    if signals.ndim == 1: signals = np.expand_dims(signals,0)

    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    feats = np.zeros((signals.shape[0],8),dtype='float32')
    # 5 FEATURE for freq babnds
    w = (fft(signals,axis=1)).real
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    sum_abs_pow = delta + theta + alpha + beta + gamma
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = np.log10(stats.kurtosis(signals,fisher=False,axis=1))        # kurtosis
    feats[:,6] = np.log10(-np.sum([(x/nsamp)*(np.log(x/nsamp)) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    #feats[:,7] = np.polynomial.polynomial.polyfit(np.log(f[np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]), np.log(w[0,np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),1)
    feats[:,7] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5)
    if np.any(feats==np.nan): print('NaN detected')

    return np.nan_to_num(feats)
tools.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def feat_eog(signals):
    """
    calculate the EOG features
    :param signals: 1D or 2D signals
    """

    if signals.ndim == 1: signals = np.expand_dims(signals,0)
    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    w = (fft(signals,axis=1)).real   
    feats = np.zeros((signals.shape[0],15),dtype='float32')
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    sum_abs_pow = delta + theta + alpha + beta + gamma
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5) #smean
    feats[:,6] = np.sqrt(np.max(signals, axis=1))    #PAV
    feats[:,7] = np.sqrt(np.abs(np.min(signals, axis=1)))   #VAV   
    feats[:,8] = np.argmax(signals, axis=1)/nsamp #PAP
    feats[:,9] = np.argmin(signals, axis=1)/nsamp #VAP
    feats[:,10] = np.sqrt(np.sum(np.abs(signals), axis=1)/ np.mean(np.sum(np.abs(signals), axis=1))) # AUC
    feats[:,11] = np.sum(((np.roll(np.sign(signals), 1,axis=1) - np.sign(signals)) != 0).astype(int),axis=1)/nsamp #TVC
    feats[:,12] = np.log10(np.std(signals, axis=1)) #STD/VAR
    feats[:,13] = np.log10(stats.kurtosis(signals,fisher=False,axis=1))       # kurtosis
    feats[:,14] = np.log10(-np.sum([(x/nsamp)*((np.log((x+np.spacing(1))/nsamp))) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    if np.any(feats==np.nan): print('NaN detected')
    return np.nan_to_num(feats)
tools.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def feat_emg(signals):
    """
    calculate the EMG median as defined by Leangkvist (2012),
    """
    if signals.ndim == 1: signals = np.expand_dims(signals,0)
    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    w = (fft(signals,axis=1)).real   
    feats = np.zeros((signals.shape[0],13),dtype='float32')
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    sum_abs_pow = delta + theta + alpha + beta + gamma
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5) #smean
    emg = np.sum(np.abs(w[:,np.arange(12.5*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)
    feats[:,6] = emg / np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)  # ratio of high freq to total motor
    feats[:,7] = np.median(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)    # median freq
    feats[:,8] = np.mean(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)   #  mean freq
    feats[:,9] = np.std(signals, axis=1)    #  std 
    feats[:,10] = np.mean(signals,axis=1)
    feats[:,11] = np.log10(stats.kurtosis(signals,fisher=False,axis=1) )
    feats[:,12] = np.log10(-np.sum([(x/nsamp)*((np.log((x+np.spacing(1))/nsamp))) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    if np.any(feats==np.nan): print('NaN detected')

    return np.nan_to_num(feats)
sts.py 文件源码 项目:SentEval 作者: facebookresearch 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def do_prepare(self, params, prepare):
        if 'similarity' in params:
            self.similarity = params.similarity
        else:  # Default similarity is cosine
            self.similarity = lambda s1, s2: np.nan_to_num(cosine(np.nan_to_num(s1), np.nan_to_num(s2)))
        return prepare(params, self.samples)
solar_corr.py 文件源码 项目:solar-correlation-map 作者: Zapf-Consulting 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def transform_to_correlation_dist(data):
    y_corr = np.corrcoef(data.T)
    # we just need the magnitude of the correlation and don't care whether it's positive or not
    abs_corr = np.abs(y_corr)
    return np.nan_to_num(abs_corr)
multiblackbody.py 文件源码 项目:MOSFiT 作者: guillochon 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def process(self, **kwargs):
        """Process module."""
        raise RuntimeError('`MultiBlackbody` is not yet functional.')
        kwargs = self.prepare_input(self.key('luminosities'), **kwargs)
        self._luminosities = kwargs[self.key('luminosities')]
        self._bands = kwargs['all_bands']
        self._band_indices = kwargs['all_band_indices']
        self._areas = kwargs[self.key('areas')]
        self._temperature_phots = kwargs[self.key('temperaturephots')]
        xc = self.X_CONST  # noqa: F841
        fc = self.FLUX_CONST  # noqa: F841
        temperature_phot = self._temperature_phot
        zp1 = 1.0 + kwargs[self.key('redshift')]
        seds = []
        for li, lum in enumerate(self._luminosities):
            cur_band = self._bands[li]  # noqa: F841
            bi = self._band_indices[li]
            rest_freqs = [x * zp1  # noqa: F841
                          for x in self._sample_frequencies[bi]]
            wav_arr = np.array(self._sample_wavelengths[bi])  # noqa: F841
            radius_phot = self._radius_phot[li]  # noqa: F841
            temperature_phot = self._temperature_phot[li]  # noqa: F841

            if li == 0:
                sed = ne.evaluate(
                    'fc * radius_phot**2 * rest_freqs**3 / '
                    '(exp(xc * rest_freqs / temperature_phot) - 1.0)')
            else:
                sed = ne.re_evaluate()

            sed = np.nan_to_num(sed)

            seds.append(list(sed))

        seds = self.add_to_existing_seds(seds, **kwargs)

        return {'sample_wavelengths': self._sample_wavelengths,
                self.key('seds'): seds}
SurfaceFollow.py 文件源码 项目:Modeling-Cloth 作者: the3dadvantage 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def rotate_around_axis(coords, Q, origin='empty'):
    '''Uses standard quaternion to rotate a vector. Q requires
    a 4-dimensional vector. coords is the 3d location of the point.
    coords can also be an N x 3 array of vectors. Happens to work
    with Q as a tuple or a np array shape 4'''
    if origin == 'empty':    
        vcV = np.cross(Q[1:], coords)
        RV = np.nan_to_num(coords + vcV * (2*Q[0]) + np.cross(Q[1:],vcV)*2)
    else:
        coords -= origin
        vcV = np.cross(Q[1:],coords)
        RV = (np.nan_to_num(coords + vcV * (2*Q[0]) + np.cross(Q[1:],vcV)*2)) + origin       
        coords += origin #undo in-place offset
    return RV
SurfaceFollow.py 文件源码 项目:Modeling-Cloth 作者: the3dadvantage 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def barycentric_generate(hits, tris):
    '''Create scalars to be used by points and triangles'''
    # where the hit lands on the two tri vecs
    tv = tris[:, 1] - tris[:, 0]
    hv = hits - tris[:, 0]
    d1a = np.einsum('ij, ij->i', hv, tv)
    d1b = np.einsum('ij, ij->i', tv, tv)
    scalar1 = np.nan_to_num(d1a / d1b)

    t2v = tris[:, 2] - tris[:, 0]
    d2a = np.einsum('ij, ij->i', hv, t2v)
    d2b = np.einsum('ij, ij->i', t2v, t2v)
    scalar2 = np.nan_to_num(d2a / d2b)

    # closest point on edge segment between the two points created above
    cp1 = tv * np.expand_dims(scalar1, axis=1)
    cp2 = t2v * np.expand_dims(scalar2, axis=1)
    cpvec = cp2 - cp1
    cp1_at = tris[:,0] + cp1
    hcp = hits - cp1_at # this is cp3 above. Not sure what's it's for yet
    dhcp = np.einsum('ij, ij->i', hcp, cpvec)
    d3b = np.einsum('ij, ij->i', cpvec, cpvec)
    hcp_scalar = np.nan_to_num(dhcp / d3b)
    hcp_vec = cpvec * np.expand_dims(hcp_scalar, axis=1)    

    # base of tri on edge between first two points
    d3 = np.einsum('ij, ij->i', -cp1, cpvec)
    scalar3 = np.nan_to_num(d3 / d3b)
    base_cp_vec = cpvec * np.expand_dims(scalar3, axis=1)
    base_on_span = cp1_at + base_cp_vec

    # Where the point occurs on the edge between the base of the triangle
    #   and the cpoe of the base of the triangle on the cpvec    
    base_vec = base_on_span - tris[:,0]
    dba = np.einsum('ij, ij->i', hv, base_vec)
    dbb = np.einsum('ij, ij->i', base_vec, base_vec)
    scalar_final = np.nan_to_num(dba / dbb)
    p_on_bv = base_vec * np.expand_dims(scalar_final, axis=1)
    perp = (p_on_bv) - (cp1 + base_cp_vec)
    return scalar1, scalar2, hcp_scalar, scalar3, scalar_final
SurfaceFollow.py 文件源码 项目:Modeling-Cloth 作者: the3dadvantage 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def project_points(points, tri_coords):
    '''Using this to get the points off the surface
    Takes the average length of two vecs off triangles
    and applies it to the length of the normals.
    This way the normal scales with the mesh and with
    changes to the individual triangle vectors'''
    t0 = tri_coords[:, 0]
    t1 = tri_coords[:, 1]
    t2 = tri_coords[:, 2]
    tv1 = t1 - t0
    tv2 = t2 - t0
    cross = np.cross(tv1, tv2)

    # get the average length of the two vectors and apply it to the cross product
    sq = np.sqrt(np.einsum('ij,ij->i', cross, cross))
    x1 = np.einsum('ij,ij->i', tv1, tv1)
    x2 = np.einsum('ij,ij->i', tv2, tv2)
    av_root = np.sqrt((x1 + x2) / 2)
    cr_root = (cross / np.expand_dims(sq, axis=1)) * np.expand_dims(av_root, axis=1)    

    v1 = points - t0
    v1_dots = np.einsum('ij,ij->i', cr_root, v1)
    n_dots = np.einsum('ij,ij->i', cr_root, cr_root)
    scale = np.nan_to_num(v1_dots / n_dots)
    offset = cr_root * np.expand_dims(scale, axis=1)
    drop = points - offset # The drop is used by the barycentric generator as points in the triangles
    return drop, scale
do_adjust.py 文件源码 项目:Large-scale-bundle-adjustment-in-scipy 作者: bachmmmar 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def rotate(points, rot_vecs):
    """Rotate points by given rotation vectors.

    Rodrigues' rotation formula is used.
    """
    theta = np.linalg.norm(rot_vecs, axis=1)[:, np.newaxis]
    with np.errstate(invalid='ignore'):
        v = rot_vecs / theta
        v = np.nan_to_num(v)
    dot = np.sum(points * v, axis=1)[:, np.newaxis]
    cos_theta = np.cos(theta)
    sin_theta = np.sin(theta)

    return cos_theta * points + sin_theta * np.cross(v, points) + dot * (1 - cos_theta) * v
myequation.py 文件源码 项目:decision_tree 作者: leeamen 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def Entropy(vector_class):
  m = np.sum(vector_class)
  if m <= 0.1:return 0.0

  vec = 1.0/m*np.array(vector_class)
#  print vec
  try:
    e = -1.0 * np.dot(vec, np.nan_to_num(np.log2(vec)))
  except RuntimeWarning:
    pass
  return e

#2?
metrics.py 文件源码 项目:introspective 作者: numeristical 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def inverse_binary_entropy_nats(entropy_val, num_iter=3):
    guess = (np.arcsin((entropy_val/np.log(2))**(1/.645)))/np.pi
    for i in range(num_iter):
        guess = guess + np.nan_to_num((entropy_val-binary_entropy_nats(guess))/binary_entropy_nats_prime(guess))
    return guess
chart.py 文件源码 项目:DeepTrade_keras 作者: happynoom 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def moving_extract(self, window=30, open_prices=None, close_prices=None, high_prices=None, low_prices=None,
                       volumes=None, with_label=True, flatten=True):
        self.extract(open_prices=open_prices, close_prices=close_prices, high_prices=high_prices, low_prices=low_prices,
                     volumes=volumes)
        feature_arr = numpy.asarray(self.feature)
        p = 0
        rows = feature_arr.shape[0]
        print("feature dimension: %s" % rows)
        if with_label:
            moving_features = []
            moving_labels = []
            while p + window < feature_arr.shape[1]:
                x = feature_arr[:, p:p + window]
                # y = cmp(close_prices[p + window], close_prices[p + window - 1]) + 1
                p_change = (close_prices[p + window] - close_prices[p + window - 1]) / close_prices[p + window - 1]
                # use percent of change as label
                y = p_change
                if flatten:
                    x = x.flatten("F")
                moving_features.append(numpy.nan_to_num(x))
                moving_labels.append(y)
                p += 1

            return numpy.asarray(moving_features), numpy.asarray(moving_labels)
        else:
            moving_features = []
            while p + window <= feature_arr.shape[1]:
                x = feature_arr[:, p:p + window]
                if flatten:
                    x = x.flatten("F")
                moving_features.append(numpy.nan_to_num(x))
                p += 1
            return moving_features
chart.py 文件源码 项目:DeepTrade_keras 作者: happynoom 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def feature_distribution(self):
        k = 0
        for feature_column in self.feature:
            fc = numpy.nan_to_num(feature_column)
            mean = numpy.mean(fc)
            var = numpy.var(fc)
            max_value = numpy.max(fc)
            min_value = numpy.min(fc)
            print("[%s_th feature] mean: %s, var: %s, max: %s, min: %s" % (k, mean, var, max_value, min_value))
            k = k + 1


问题


面经


文章

微信
公众号

扫码关注公众号