python类isinf()的实例源码

textrank_fujun.py 文件源码 项目:YelpDataChallenge 作者: fujunswufe 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def cosine_similarity_self(A):
    similarity = np.dot(A, A.T)
    square_mag = np.diag(similarity)
    inv_square_mag = 1 / square_mag
    inv_square_mag[np.isinf(inv_square_mag)] = 0
    inv_mag = np.sqrt(inv_square_mag)
    cosine = similarity * inv_mag
    cosine = cosine.T * inv_mag
    return cosine

# document should be a list of sentences
# method = "word2vec", "lda", "tfidf"
# def extraction(document, method="rawText"):
#
#     # graph = build_graph(document, method)  # document is a list of sentences
#
#     calculated_page_rank = networkx.pagerank(graph, weight="weight")
#
#     # most important sentences in descending order of importance
#     sentences = sorted(calculated_page_rank, key=calculated_page_rank.get, reverse=False)
#
#     return sentences[0:4]
util.py 文件源码 项目:table-compositor 作者: InvestmentSystems 项目源码 文件源码 阅读 50 收藏 0 点赞 0 评论 0
def df_type_to_str(i):
    '''
    Convert into simple datatypes from pandas/numpy types
    '''
    if isinstance(i, np.bool_):
        return bool(i)
    if isinstance(i, np.int_):
        return int(i)
    if isinstance(i, np.float):
        if np.isnan(i):
            return 'NaN'
        elif np.isinf(i):
            return str(i)
        return float(i)
    if isinstance(i, np.uint):
        return int(i)
    if type(i) == bytes:
        return i.decode('UTF-8')
    if isinstance(i, (tuple, list)):
        return str(i)
    if i is pd.NaT:  # not identified as a float null
        return 'NaN'
    return str(i)
ColorMapWidget.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def map(self, data):
        data = data[self.fieldName]
        colors = np.empty((len(data), 4))
        default = np.array(fn.colorTuple(self['Default'])) / 255.
        colors[:] = default

        for v in self.param('Values'):
            mask = data == v.maskValue
            c = np.array(fn.colorTuple(v.value())) / 255.
            colors[mask] = c
        #scaled = np.clip((data-self['Min']) / (self['Max']-self['Min']), 0, 1)
        #cmap = self.value()
        #colors = cmap.map(scaled, mode='float')

        #mask = np.isnan(data) | np.isinf(data)
        #nanColor = self['NaN']
        #nanColor = (nanColor.red()/255., nanColor.green()/255., nanColor.blue()/255., nanColor.alpha()/255.)
        #colors[mask] = nanColor

        return colors
ColorMapWidget.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def map(self, data):
        data = data[self.fieldName]
        colors = np.empty((len(data), 4))
        default = np.array(fn.colorTuple(self['Default'])) / 255.
        colors[:] = default

        for v in self.param('Values'):
            mask = data == v.maskValue
            c = np.array(fn.colorTuple(v.value())) / 255.
            colors[mask] = c
        #scaled = np.clip((data-self['Min']) / (self['Max']-self['Min']), 0, 1)
        #cmap = self.value()
        #colors = cmap.map(scaled, mode='float')

        #mask = np.isnan(data) | np.isinf(data)
        #nanColor = self['NaN']
        #nanColor = (nanColor.red()/255., nanColor.green()/255., nanColor.blue()/255., nanColor.alpha()/255.)
        #colors[mask] = nanColor

        return colors
test_util.py 文件源码 项目:ConfigSpace 作者: automl 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _test_get_one_exchange_neighbourhood(self, hp):
        cs = ConfigurationSpace()
        num_neighbors = 0
        if not isinstance(hp, list):
            hp = [hp]
        for hp_ in hp:
            cs.add_hyperparameter(hp_)
            if np.isinf(hp_.get_num_neighbors()):
                num_neighbors += 4
            else:
                num_neighbors += hp_.get_num_neighbors()

        cs.seed(1)
        config = cs.get_default_configuration()
        all_neighbors = []
        for i in range(100):
            neighborhood = get_one_exchange_neighbourhood(config, i)
            for new_config in neighborhood:
                self.assertNotEqual(config, new_config)
                all_neighbors.append(new_config)

        return all_neighbors
seriesanalysis.py 文件源码 项目:histwords 作者: williamleif 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def get_series_mean_std_peryear(word_time_series, i_year_words, one_minus=False, start_year=1900, end_year=2000, year_inc=1, exclude_partial_missing=False):
    """
    Return the mean and stderr arrays for the values of the words specified per year in i_year_words for specified years 
    """
    means = []
    stderrs = []
    r_word_time_series = {}
    if exclude_partial_missing:
        for word, time_series in word_time_series.iteritems():
            if not np.isnan(np.sum(time_series.values())):
                r_word_time_series[word] = time_series
    else:
        r_word_time_series = word_time_series
    for year in xrange(start_year, end_year + 1, year_inc):
        word_array = np.array([r_word_time_series[word][year] for word in i_year_words[year] 
            if word in r_word_time_series and not np.isnan(r_word_time_series[word][year]) and not np.isinf(r_word_time_series[word][year])])
        if len(word_array) == 0:
            continue
        if one_minus:
            word_array = 1 - word_array
        means.append(word_array.mean())
        stderrs.append(word_array.std())
    return np.array(means), np.array(stderrs)
DNGR.py 文件源码 项目:DNGR-Keras 作者: MdAsifKhan 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def PPMI_matrix(M):

    M = scale_sim_mat(M)
    nm_nodes = len(M)

    col_s = np.sum(M, axis=0).reshape(1,nm_nodes)
    row_s = np.sum(M, axis=1).reshape(nm_nodes,1)
    D = np.sum(col_s)
    rowcol_s = np.dot(row_s,col_s)
    PPMI = np.log(np.divide(D*M,rowcol_s))
    PPMI[np.isnan(PPMI)] = 0.0
    PPMI[np.isinf(PPMI)] = 0.0
    PPMI[np.isneginf(PPMI)] = 0.0
    PPMI[PPMI<0] = 0.0

    return PPMI
test_scalarmath.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_zero_division(self):
        with np.errstate(all="ignore"):
            for t in [np.complex64, np.complex128]:
                a = t(0.0)
                b = t(1.0)
                assert_(np.isinf(b/a))
                b = t(complex(np.inf, np.inf))
                assert_(np.isinf(b/a))
                b = t(complex(np.inf, np.nan))
                assert_(np.isinf(b/a))
                b = t(complex(np.nan, np.inf))
                assert_(np.isinf(b/a))
                b = t(complex(np.nan, np.nan))
                assert_(np.isnan(b/a))
                b = t(0.)
                assert_(np.isnan(b/a))
testMovies.py 文件源码 项目:software-suite-movie-market-analysis 作者: 93lorenzo 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def hypothesis(self,x,theta):
        l_theta = []
        for i in range(len(theta)):
            #print theta[i]
            thetaX = x.dot(theta[i])#  wx
            thetaX_exp = np.exp(thetaX)  # exp(wx)
            l_theta.append(thetaX_exp)
        l_theta = np.array(l_theta)
        #print np.shape(l_theta)
        thetaX_exp_sum = np.sum(l_theta)  # sum of exp(wx)
        #print thetaX_exp_sum
        p = l_theta.T / thetaX_exp_sum  # 5xlen(x) predicted results
        if np.isinf(p).any():  # deal with overflow in results.
            inf_idx = np.isinf(p)  # idx where overflow occurs
            val = np.sum(p, 0) / np.sum(inf_idx, 0) * inf_idx  # values to be used to substitution
            p[inf_idx] = val[inf_idx]  # substitute values
        return p.T




#### predict the labels for a set of observations ####
LogisticRegression.py 文件源码 项目:software-suite-movie-market-analysis 作者: 93lorenzo 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def hypothesis(self,x,theta):
        l_theta = []
        for i in range(len(theta)):
            thetaX = x.dot(theta[i])    ## wx ##
            thetaX_exp = np.exp(thetaX)  ## exp(wx)##
            if np.isinf(thetaX_exp):
                print "overflow"
                ###prova a mettere una matrice con probabilita tutte uguali in caso di overflow
            l_theta.append(thetaX_exp)
        l_theta = np.array(l_theta)
        thetaX_exp_sum = np.sum(l_theta)  ## sum of exp(wx) ##
        p = l_theta.T / thetaX_exp_sum  ## 5xlen(x) predicted results ##
        #print np.sum(p)
        '''if np.isinf(p).any():  ## deal with overflow in results ##
            inf_idx = np.isinf(p)  ## idx where overflow occurs ##
            val = np.sum(p, 0) / np.sum(inf_idx, 0) * inf_idx ## values to be used to substitution ##
            p[inf_idx] = val[inf_idx]  ## substitute values ##'''
        return p.T

    ## Calcolo la Derivata della Funzione di Costo ##
mcmc.py 文件源码 项目:thejoker 作者: adrn 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def ln_posterior(mcmc_p, joker_params, data):
    if joker_params._fixed_jitter:
        mcmc_p = list(mcmc_p)
        mcmc_p.insert(5, -np.inf) # HACK: whoa, major hackage!

    p = from_mcmc_params(mcmc_p).reshape(len(mcmc_p))

    lnp = ln_prior(p, joker_params)
    if np.isinf(lnp):
        return lnp

    lnl = ln_likelihood(p, joker_params, data)
    lnprob = lnp + lnl.sum()

    if np.isnan(lnprob):
        return -np.inf

    return lnprob
minibatch2.py 文件源码 项目:faster_rcnn_pytorch 作者: longcw 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def _get_viewpoint_estimation_labels(viewpoint_data, clss, num_classes):
    """Bounding-box regression targets are stored in a compact form in the
    roidb.

    This function expands those targets into the 4-of-4*K representation used
    by the network (i.e. only one class has non-zero targets). The loss weights
    are similarly expanded.

    Returns:
        view_target_data (ndarray): N x 3K blob of regression targets
        view_loss_weights (ndarray): N x 3K blob of loss weights
    """
    view_targets = np.zeros((clss.size, 3 * num_classes), dtype=np.float32)
    view_loss_weights = np.zeros(view_targets.shape, dtype=np.float32)
    inds = np.where( (clss > 0) & np.isfinite(viewpoint_data[:,0]) & np.isfinite(viewpoint_data[:,1]) & np.isfinite(viewpoint_data[:,2]) )[0]
    for ind in inds:
        cls = clss[ind]
        start = 3 * cls
        end = start + 3
        view_targets[ind, start:end] = viewpoint_data[ind, :]
        view_loss_weights[ind, start:end] = [1., 1., 1.]

    assert not np.isinf(view_targets).any(), 'viewpoint undefined'
    return view_targets, view_loss_weights
similarity.py 文件源码 项目:word2vec_pipeline 作者: NIHOPA 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def compute_document_similarity(X):
    '''
    From a matrix of unit distances, computes the cosine similarity
    then changes to the angular distance (for a proper metric).
    '''

    S = cdist(X, X, metric='cosine')
    S -= 1
    S *= -1
    S[S > 1] = 1.0
    S[S < 0] = 0.0

    # Set nan values to zero
    S[np.isnan(S)] = 0

    # Convert to angular distance (a proper metric)
    S = 1 - (np.arccos(S) / np.pi)
    assert(not np.isnan(S).any())
    assert(not np.isinf(S).any())

    return S
test_likdelta.py 文件源码 项目:bayes-qnet 作者: casutton 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def do_test_delta_internal (self, net, nreps, ntasks, pct):
        for ri in range(nreps):
            arrv = net.sample (ntasks)
            obs = arrv.subset_by_task (pct)
            samples = net.slice_resample (obs, 0, 5)
            arrv_from = samples[len(samples)-1]
            print "Computing LIK0"
            lik0 = net.log_prob (arrv_from)
            for e in arrv_from:
                if not e.obs_d:
#                    print "Testing evt ", e
                    dfn = qnet.GGkGibbs(net, arrv_from, e, lik0).dfn()
                    d0 = e.d
                    d_test = [ d0+delta for delta in [ -0.5, -0.1, 0.1, 0.5, 1.0, 1.5, 3.0 ] ]
                    for d1 in d_test:
#                        print "Testing departure ", d1
                        lik_incremental = dfn(d1)
                        if numpy.isinf (lik_incremental): continue # probably right
                        lik_true = self.compute_full_lik (net, arrv_from, e, d1)
                        print "%d %.4f %.4f %.4f %.4f" % (e.eid, d0, d1, lik_incremental, lik_true)
                        if numpy.isinf(lik_true):
                            self.assertTrue (numpy.isinf(lik_incremental))
                        else:
                            self.assertAlmostEquals (lik_true, lik_incremental, 5)
numerics.py 文件源码 项目:vinci 作者: Phylliade 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def huber_loss(y_true, y_pred, clip_value):
    # Huber loss, see https://en.wikipedia.org/wiki/Huber_loss and
    # https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
    # for details.
    assert clip_value > 0.

    x = y_true - y_pred
    if np.isinf(clip_value):
        # Spacial case for infinity since Tensorflow does have problems
        # if we compare `K.abs(x) < np.inf`.
        return .5 * tf.square(x)

    condition = tf.abs(x) < clip_value
    squared_loss = .5 * tf.square(x)
    linear_loss = clip_value * (tf.abs(x) - .5 * clip_value)

    return tf.where(condition, squared_loss, linear_loss)  # condition, true, false
linalg.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def zdivide(x, y):
    """
    Return x/y, with 0 instead of NaN where y is 0.

    Parameters
    ----------
    x : array_like
      Numerator
    y : array_like
      Denominator

    Returns
    -------
    z : ndarray
      Quotient `x`/`y`
    """

    with np.errstate(divide='ignore', invalid='ignore'):
        div = x / y
    div[np.logical_or(np.isnan(div), np.isinf(div))] = 0
    return div
minibatch2.py 文件源码 项目:Automatic_Group_Photography_Enhancement 作者: Yuliang-Zou 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _get_viewpoint_estimation_labels(viewpoint_data, clss, num_classes):
    """Bounding-box regression targets are stored in a compact form in the
    roidb.

    This function expands those targets into the 4-of-4*K representation used
    by the network (i.e. only one class has non-zero targets). The loss weights
    are similarly expanded.

    Returns:
        view_target_data (ndarray): N x 3K blob of regression targets
        view_loss_weights (ndarray): N x 3K blob of loss weights
    """
    view_targets = np.zeros((clss.size, 3 * num_classes), dtype=np.float32)
    view_loss_weights = np.zeros(view_targets.shape, dtype=np.float32)
    inds = np.where( (clss > 0) & np.isfinite(viewpoint_data[:,0]) & np.isfinite(viewpoint_data[:,1]) & np.isfinite(viewpoint_data[:,2]) )[0]
    for ind in inds:
        cls = clss[ind]
        start = 3 * cls
        end = start + 3
        view_targets[ind, start:end] = viewpoint_data[ind, :]
        view_loss_weights[ind, start:end] = [1., 1., 1.]

    assert not np.isinf(view_targets).any(), 'viewpoint undefined'
    return view_targets, view_loss_weights
Similarity.py 文件源码 项目:roleo 作者: tony-hong 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def sim_inv_mag(M):
    '''
    Compute similarity matrix and the inverse of the magnitude on its diagonal for vectors.
    The 'M' is a matrix containing input vectors.    
    '''
    # base similarity matrix (all dot products)
    # replace this with A.dot(A.T).todense() for sparse representation
    similarity = np.dot(M, M.T)
    # squared magnitude of preference vectors (number of occurrences)
    square_mag = np.diag(similarity)
    # inverse squared magnitude
    inv_square_mag = 1 / square_mag
    # if it doesn't occur, set it's inverse magnitude to zero (instead of inf)
    inv_square_mag[np.isinf(inv_square_mag)] = 0
    # inverse of the magnitude
    inv_mag = np.sqrt(inv_square_mag)

    return similarity, inv_mag
test_operations.py 文件源码 项目:blmath 作者: bodylabs 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_zero_safe_divide(self):
        from blmath.numerics.operations import zero_safe_divide

        numerator = np.ones((5, 5))
        numerator[3, 3] = 0.

        denominator = np.ones((5, 5))
        denominator[2, 2] = 0.
        denominator[3, 3] = 0.

        with warnings.catch_warnings():
            warnings.simplefilter("ignore", RuntimeWarning)
            true_divide = np.true_divide(numerator, denominator)
        safe_divide = zero_safe_divide(numerator, denominator)
        self.assertTrue(np.isinf(true_divide[2, 2]))
        self.assertEqual(safe_divide[2, 2], 0.)
        self.assertTrue(np.isnan(true_divide[3, 3]))
        self.assertEqual(safe_divide[3, 3], 0.)
operations.py 文件源码 项目:blmath 作者: bodylabs 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def zero_safe_divide(a, b, default_error_value=0.):
    """Element-wise division that accounts for floating point errors.

    Both invalid floating-point (e.g. 0. / 0.) and divide be zero errors are
    suppressed. Resulting values (NaN and Inf respectively) are replaced with
    `default_error_value`.

    """
    import numpy as np

    with np.errstate(invalid='ignore', divide='ignore'):
        quotient = np.true_divide(a, b)
        bad_value_indices = np.logical_or(
            np.isnan(quotient), np.isinf(quotient))
        quotient[bad_value_indices] = default_error_value

    return quotient
Likelihood.py 文件源码 项目:BayesVP 作者: cameronliang 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def __call__(self,alpha):
        """
        Posterior distribution

        Returns
        ---------
        lnprob: float
            Natural log of posterior probability
        """

        lp = self.lnprior(alpha)

        if np.isinf(lp):
            return -np.inf
        else:
            return np.atleast_1d(lp + self.lnlike(alpha))[0]
test_scalarmath.py 文件源码 项目:krpcScripts 作者: jwvanderbeck 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_zero_division(self):
        with np.errstate(all="ignore"):
            for t in [np.complex64, np.complex128]:
                a = t(0.0)
                b = t(1.0)
                assert_(np.isinf(b/a))
                b = t(complex(np.inf, np.inf))
                assert_(np.isinf(b/a))
                b = t(complex(np.inf, np.nan))
                assert_(np.isinf(b/a))
                b = t(complex(np.nan, np.inf))
                assert_(np.isinf(b/a))
                b = t(complex(np.nan, np.nan))
                assert_(np.isnan(b/a))
                b = t(0.)
                assert_(np.isnan(b/a))
est_rel_entro_HJW.py 文件源码 项目:HJW_KL_divergence_estimator 作者: Mathegineer 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def log_mat(x, n, g_coeff, c_1, const):
    with np.errstate(divide='ignore', invalid='ignore'):
        K = g_coeff.shape[0] - 1
        thres = 2 * c_1 * math.log(n) / n
        [T, X] = np.meshgrid(thres, x)
        ratio = np.clip(2*X/T - 1, 0, 1)
        # force MATLAB-esque behavior with NaN, inf
        ratio[T == 0] = 1.0
        ratio[X == 0] = 0.0
        q = np.reshape(np.arange(K), [1, 1, K])
        g = np.tile(np.reshape(g_coeff, [1, 1, K + 1]), [c_1.shape[1], 1])
        g[:, :, 0] = g[:, :, 0] + np.log(thres)
        MLE = np.log(X) + (1-X) / (2*X*n)
        MLE[X == 0] = -np.log(n) - const
        tmp = (n*X[:,:,np.newaxis] - q)/(T[:,:,np.newaxis]*(n - q))
        polyApp = np.sum(np.cumprod(np.dstack([np.ones(T.shape + (1,)), tmp]),
                                    axis=2) * g, axis=2)
        polyFail = np.logical_or(np.isnan(polyApp), np.isinf(polyApp))
        polyApp[polyFail] = MLE[polyFail]
        return ratio*MLE + (1-ratio)*polyApp
minibatch2.py 文件源码 项目:Faster-RCNN_TF 作者: smallcorgi 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def _get_viewpoint_estimation_labels(viewpoint_data, clss, num_classes):
    """Bounding-box regression targets are stored in a compact form in the
    roidb.

    This function expands those targets into the 4-of-4*K representation used
    by the network (i.e. only one class has non-zero targets). The loss weights
    are similarly expanded.

    Returns:
        view_target_data (ndarray): N x 3K blob of regression targets
        view_loss_weights (ndarray): N x 3K blob of loss weights
    """
    view_targets = np.zeros((clss.size, 3 * num_classes), dtype=np.float32)
    view_loss_weights = np.zeros(view_targets.shape, dtype=np.float32)
    inds = np.where( (clss > 0) & np.isfinite(viewpoint_data[:,0]) & np.isfinite(viewpoint_data[:,1]) & np.isfinite(viewpoint_data[:,2]) )[0]
    for ind in inds:
        cls = clss[ind]
        start = 3 * cls
        end = start + 3
        view_targets[ind, start:end] = viewpoint_data[ind, :]
        view_loss_weights[ind, start:end] = [1., 1., 1.]

    assert not np.isinf(view_targets).any(), 'viewpoint undefined'
    return view_targets, view_loss_weights
minibatch2.py 文件源码 项目:TFFRCNN 作者: InterVideo 项目源码 文件源码 阅读 50 收藏 0 点赞 0 评论 0
def _get_viewpoint_estimation_labels(viewpoint_data, clss, num_classes):
    """Bounding-box regression targets are stored in a compact form in the
    roidb.

    This function expands those targets into the 4-of-4*K representation used
    by the network (i.e. only one class has non-zero targets). The loss weights
    are similarly expanded.

    Returns:
        view_target_data (ndarray): N x 3K blob of regression targets
        view_loss_weights (ndarray): N x 3K blob of loss weights
    """
    view_targets = np.zeros((clss.size, 3 * num_classes), dtype=np.float32)
    view_loss_weights = np.zeros(view_targets.shape, dtype=np.float32)
    inds = np.where( (clss > 0) & np.isfinite(viewpoint_data[:,0]) & np.isfinite(viewpoint_data[:,1]) & np.isfinite(viewpoint_data[:,2]) )[0]
    for ind in inds:
        cls = clss[ind]
        start = 3 * cls
        end = start + 3
        view_targets[ind, start:end] = viewpoint_data[ind, :]
        view_loss_weights[ind, start:end] = [1., 1., 1.]

    assert not np.isinf(view_targets).any(), 'viewpoint undefined'
    return view_targets, view_loss_weights
voice_enhancement.py 文件源码 项目:pyssp 作者: shunsukeaihara 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def compute_by_noise_pow(self, signal, n_pow):
        s_spec = np.fft.fftpack.fft(signal * self._window)
        s_amp = np.absolute(s_spec)
        s_phase = np.angle(s_spec)
        gamma = self._calc_aposteriori_snr(s_amp, n_pow)
        xi = self._calc_apriori_snr(gamma)
        self._prevGamma = gamma
        nu = gamma * xi / (1.0 + xi)
        self._G = (self._gamma15 * np.sqrt(nu) / gamma) * np.exp(-nu / 2.0) *\
                  ((1.0 + nu) * spc.i0(nu / 2.0) + nu * spc.i1(nu / 2.0))
        idx = np.less(s_amp ** 2.0, n_pow)
        self._G[idx] = self._constant
        idx = np.isnan(self._G) + np.isinf(self._G)
        self._G[idx] = xi[idx] / (xi[idx] + 1.0)
        idx = np.isnan(self._G) + np.isinf(self._G)
        self._G[idx] = self._constant
        self._G = np.maximum(self._G, 0.0)
        amp = self._G * s_amp
        amp = np.maximum(amp, 0.0)
        amp2 = self._ratio * amp + (1.0 - self._ratio) * s_amp
        self._prevAmp = amp
        spec = amp2 * np.exp(s_phase * 1j)
        return np.real(np.fft.fftpack.ifft(spec))
voice_enhancement.py 文件源码 项目:pyssp 作者: shunsukeaihara 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def compute_by_noise_pow(self, signal, n_pow):
        s_spec = np.fft.fftpack.fft(signal * self._window)
        s_amp = np.absolute(s_spec)
        s_phase = np.angle(s_spec)
        gamma = self._calc_aposteriori_snr(s_amp, n_pow)
        xi = self._calc_apriori_snr(gamma)
        # xi = self._calc_apriori_snr2(gamma,n_pow)
        self._prevGamma = gamma
        nu = gamma * xi / (1.0 + xi)
        self._G = xi / (1.0 + xi) * np.exp(0.5 * spc.exp1(nu))
        idx = np.less(s_amp ** 2.0, n_pow)
        self._G[idx] = self._constant
        idx = np.isnan(self._G) + np.isinf(self._G)
        self._G[idx] = xi[idx] / (xi[idx] + 1.0)
        idx = np.isnan(self._G) + np.isinf(self._G)
        self._G[idx] = self._constant
        self._G = np.maximum(self._G, 0.0)
        amp = self._G * s_amp
        amp = np.maximum(amp, 0.0)
        amp2 = self._ratio * amp + (1.0 - self._ratio) * s_amp
        self._prevAmp = amp
        spec = amp2 * np.exp(s_phase * 1j)
        return np.real(np.fft.fftpack.ifft(spec))
voice_enhancement.py 文件源码 项目:pyssp 作者: shunsukeaihara 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def compute_by_noise_pow(self, signal, n_pow):
        s_spec = np.fft.fftpack.fft(signal * self._window)
        s_amp = np.absolute(s_spec)
        s_phase = np.angle(s_spec)
        gamma = self._calc_aposteriori_snr(s_amp, n_pow)
        # xi = self._calc_apriori_snr2(gamma,n_pow)
        xi = self._calc_apriori_snr(gamma)
        self._prevGamma = gamma
        u = 0.5 - self._mu / (4.0 * np.sqrt(gamma * xi))
        self._G = u + np.sqrt(u ** 2.0 + self._tau / (gamma * 2.0))
        idx = np.less(s_amp ** 2.0, n_pow)
        self._G[idx] = self._constant
        idx = np.isnan(self._G) + np.isinf(self._G)
        self._G[idx] = xi[idx] / (xi[idx] + 1.0)
        idx = np.isnan(self._G) + np.isinf(self._G)
        self._G[idx] = self._constant
        self._G = np.maximum(self._G, 0.0)
        amp = self._G * s_amp
        amp = np.maximum(amp, 0.0)
        amp2 = self._ratio * amp + (1.0 - self._ratio) * s_amp
        self._prevAmp = amp
        spec = amp2 * np.exp(s_phase * 1j)
        return np.real(np.fft.fftpack.ifft(spec))
features.py 文件源码 项目:CryptoBot 作者: AdeelMufti 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def get_power_adjusted_price(books, n=10, power=2):
    '''
    Returns the percent change of an average of order prices weighted by inverse
    distance-wieghted volume for each data point in DataFrame of book data
    '''

    def calc_adjusted_price(book):
        def calc(x):
            return 0 if x.price-book.mid==0 else x.amount*(.5*book.width/(x.price-book.mid))**power
        bid_inv = 1/book.bids.iloc[:n].apply(calc, axis=1)
        ask_inv = 1/book.asks.iloc[:n].apply(calc, axis=1)
        bid_price = book.bids.price.iloc[:n]
        ask_price = book.asks.price.iloc[:n]
        sum_numerator = (bid_price*bid_inv + ask_price*ask_inv).sum()
        sum_denominator = (bid_inv + ask_inv).sum()
        # if np.isnan(sum_numerator) or np.isinf(sum_numerator) or sum_numerator == 0.0 or np.isnan(sum_denominator) or np.isinf(sum_denominator) or sum_denominator == 0.0:
        #     return 0
        quotient = sum_numerator / sum_denominator
        # if quotient < 0.0:
        #     return 0
        return quotient
    adjusted = books.apply(calc_adjusted_price, axis=1)
    return (adjusted/books.mid).apply(log).fillna(0)
self_driving_V1.1.py 文件源码 项目:my_research 作者: MorvanZhou 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def get_reward(self, next_state):
        p, v, ID, a = next_state['p'], next_state['v'], int(next_state['ID']), next_state['a']
        p_f, v_f, l_f = next_state['p_l1'], next_state['v_l1'], next_state['l_l1']

        distance = (p_f-l_f) - p
        h = distance / v
        h = 10 if np.isinf(h) else h        # avoid reward to inf
        #desired_headway = 1

        if h < 1.3 and h >= 1:
            reward = 4*(1.3-h)
        elif h > 0.7 and h < 1:
            reward = 4*(h-0.7)
        elif h >= 1.3:
            reward = -2*(h-1.3)
        else:
            # h<=0.7
            reward = -1*(0.7-h)

        self.cars[ID].reward = reward
        return reward


问题


面经


文章

微信
公众号

扫码关注公众号