python类pdf()的实例源码

test_kde.py 文件源码 项目:ChainConsumer 作者: Samreay 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_megkde_2d_basic():
    # Draw from normal, fit KDE, see if sampling from kde's pdf recovers norm
    np.random.seed(1)
    data = np.random.multivariate_normal([0, 1], [[1.0, 0.], [0., 0.75 ** 2]], size=10000)
    xs, ys = np.linspace(-4, 4, 50), np.linspace(-4, 4, 50)
    xx, yy = np.meshgrid(xs, ys, indexing='ij')
    samps = np.vstack((xx.flatten(), yy.flatten())).T
    zs = MegKDE(data).evaluate(samps).reshape(xx.shape)
    zs_x = zs.sum(axis=1)
    zs_y = zs.sum(axis=0)
    cs_x = zs_x.cumsum()
    cs_x /= cs_x[-1]
    cs_x[0] = 0
    cs_y = zs_y.cumsum()
    cs_y /= cs_y[-1]
    cs_y[0] = 0
    samps_x = interp1d(cs_x, xs)(np.random.uniform(size=10000))
    samps_y = interp1d(cs_y, ys)(np.random.uniform(size=10000))
    mu_x, std_x = norm.fit(samps_x)
    mu_y, std_y = norm.fit(samps_y)
    assert np.isclose(mu_x, 0, atol=0.1)
    assert np.isclose(std_x, 1.0, atol=0.1)
    assert np.isclose(mu_y, 1, atol=0.1)
    assert np.isclose(std_y, 0.75, atol=0.1)
test_analysis.py 文件源码 项目:ChainConsumer 作者: Samreay 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_grid_data(self):
        x, y = np.linspace(-3, 3, 200), np.linspace(-5, 5, 200)
        xx, yy = np.meshgrid(x, y, indexing='ij')
        xs, ys = xx.flatten(), yy.flatten()
        chain = np.vstack((xs, ys)).T
        pdf = (1 / (2 * np.pi)) * np.exp(-0.5 * (xs * xs + ys * ys / 4))
        c = ChainConsumer()
        c.add_chain(chain, parameters=['x', 'y'], weights=pdf, grid=True)
        summary = c.analysis.get_summary()
        x_sum = summary['x']
        y_sum = summary['y']
        expected_x = np.array([-1.0, 0.0, 1.0])
        expected_y = np.array([-2.0, 0.0, 2.0])
        threshold = 0.1
        assert np.all(np.abs(expected_x - x_sum) < threshold)
        assert np.all(np.abs(expected_y - y_sum) < threshold)
test_analysis.py 文件源码 项目:ChainConsumer 作者: Samreay 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_summary_max_symmetric_2(self):
        c = ChainConsumer()
        c.add_chain(self.data_skew)
        summary_area = 0.6827
        c.configure(statistics="max_symmetric", bins=1.0, summary_area=summary_area)
        summary = c.analysis.get_summary()['0']

        xs = np.linspace(0, 2, 1000)
        pdf = skewnorm.pdf(xs, 5, 1, 1.5)
        xmax = xs[pdf.argmax()]
        cdf_top = skewnorm.cdf(summary[2], 5, 1, 1.5)
        cdf_bottom = skewnorm.cdf(summary[0], 5, 1, 1.5)
        area = cdf_top - cdf_bottom

        assert np.isclose(xmax, summary[1], atol=0.05)
        assert np.isclose(area, summary_area, atol=0.05)
        assert np.isclose(summary[2] - summary[1], summary[1] - summary[0])
test_analysis.py 文件源码 项目:ChainConsumer 作者: Samreay 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_summary_max_symmetric_3(self):
        c = ChainConsumer()
        c.add_chain(self.data_skew)
        summary_area = 0.95
        c.configure(statistics="max_symmetric", bins=1.0, summary_area=summary_area)
        summary = c.analysis.get_summary()['0']

        xs = np.linspace(0, 2, 1000)
        pdf = skewnorm.pdf(xs, 5, 1, 1.5)
        xmax = xs[pdf.argmax()]
        cdf_top = skewnorm.cdf(summary[2], 5, 1, 1.5)
        cdf_bottom = skewnorm.cdf(summary[0], 5, 1, 1.5)
        area = cdf_top - cdf_bottom

        assert np.isclose(xmax, summary[1], atol=0.05)
        assert np.isclose(area, summary_area, atol=0.05)
        assert np.isclose(summary[2] - summary[1], summary[1] - summary[0])
test_analysis.py 文件源码 项目:ChainConsumer 作者: Samreay 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_summary_max_shortest_2(self):
        c = ChainConsumer()
        c.add_chain(self.data_skew)
        summary_area = 0.6827
        c.configure(statistics="max_shortest", bins=1.0, summary_area=summary_area)
        summary = c.analysis.get_summary()['0']

        xs = np.linspace(-1, 5, 1000)
        pdf = skewnorm.pdf(xs, 5, 1, 1.5)
        cdf = skewnorm.cdf(xs, 5, 1, 1.5)
        x2 = interp1d(cdf, xs, bounds_error=False, fill_value=np.inf)(cdf + summary_area)
        dist = x2 - xs
        ind = np.argmin(dist)
        x0 = xs[ind]
        x2 = x2[ind]
        xmax = xs[pdf.argmax()]

        assert np.isclose(xmax, summary[1], atol=0.05)
        assert np.isclose(x0, summary[0], atol=0.05)
        assert np.isclose(x2, summary[2], atol=0.05)
test_analysis.py 文件源码 项目:ChainConsumer 作者: Samreay 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_summary_max_shortest_3(self):
        c = ChainConsumer()
        c.add_chain(self.data_skew)
        summary_area = 0.95
        c.configure(statistics="max_shortest", bins=1.0, summary_area=summary_area)
        summary = c.analysis.get_summary()['0']

        xs = np.linspace(-1, 5, 1000)
        pdf = skewnorm.pdf(xs, 5, 1, 1.5)
        cdf = skewnorm.cdf(xs, 5, 1, 1.5)
        x2 = interp1d(cdf, xs, bounds_error=False, fill_value=np.inf)(cdf + summary_area)
        dist = x2 - xs
        ind = np.argmin(dist)
        x0 = xs[ind]
        x2 = x2[ind]
        xmax = xs[pdf.argmax()]

        assert np.isclose(xmax, summary[1], atol=0.05)
        assert np.isclose(x0, summary[0], atol=0.05)
        assert np.isclose(x2, summary[2], atol=0.05)
test_analysis.py 文件源码 项目:ChainConsumer 作者: Samreay 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_summary_max_central_2(self):
        c = ChainConsumer()
        c.add_chain(self.data_skew)
        summary_area = 0.6827
        c.configure(statistics="max_central", bins=1.0, summary_area=summary_area)
        summary = c.analysis.get_summary()['0']

        xs = np.linspace(-1, 5, 1000)
        pdf = skewnorm.pdf(xs, 5, 1, 1.5)
        cdf = skewnorm.cdf(xs, 5, 1, 1.5)
        xval = interp1d(cdf, xs)([0.5 - 0.5 * summary_area, 0.5 + 0.5 * summary_area])
        xmax = xs[pdf.argmax()]

        assert np.isclose(xmax, summary[1], atol=0.05)
        assert np.isclose(xval[0], summary[0], atol=0.05)
        assert np.isclose(xval[1], summary[2], atol=0.05)
pyglmnet.py 文件源码 项目:pyglmnet 作者: glm-tools 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _logL(distr, y, y_hat):
    """The log likelihood."""
    if distr in ['softplus', 'poisson']:
        eps = np.spacing(1)
        logL = np.sum(y * np.log(y_hat + eps) - y_hat)
    elif distr == 'gaussian':
        logL = -0.5 * np.sum((y - y_hat)**2)
    elif distr == 'binomial':
        # analytical formula
        logL = np.sum(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))

        # but this prevents underflow
        # z = beta0 + np.dot(X, beta)
        # logL = np.sum(y * z - np.log(1 + np.exp(z)))
    elif distr == 'probit':
        logL = np.sum(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))
    elif distr == 'gamma':
        # see
        # https://www.statistics.ma.tum.de/fileadmin/w00bdb/www/czado/lec8.pdf
        nu = 1.  # shape parameter, exponential for now
        logL = np.sum(nu * (-y / y_hat - np.log(y_hat)))
    return logL
makeMotionFiles.py 文件源码 项目:DW-POSSUM 作者: marksgraham 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def add_motion_spikes(motion_mat,frequency,severity,TR):
    time = motion_mat[:,0]
    max_translation = 5 * severity / 1000 * np.sqrt(2*np.pi)#Max translations in m, factor of sqrt(2*pi) accounts for normalisation factor in norm.pdf later on
    max_rotation = np.radians(5 * severity) *np.sqrt(2*np.pi) #Max rotation in rad
    time_blocks = np.floor(time[-1]/TR).astype(np.int32)
    for i in range(time_blocks):
        if np.random.uniform(0,1) < frequency: #Decide whether to add spike
            for j in range(1,4):
                if np.random.uniform(0,1) < 1/6:
                    motion_mat[:,j] = motion_mat[:,j] \
                    + max_translation * random.uniform(-1,1) \
                    * norm.pdf(time,loc = (i+0.5)*TR,scale = TR/5)
            for j in range(4,7):
                if np.random.uniform(0,1) < 1/6:
                    motion_mat[:,j] = motion_mat[:,j] \
                    + max_rotation * random.uniform(-1,1) \
                    * norm.pdf(time,loc = (i+0.5 + np.random.uniform(-0.25,-.25))*TR,scale = TR/5)
    return motion_mat
CBlackModel.py 文件源码 项目:MarkovModels 作者: pmontalb 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def calculate_vega(forward, strike, implied_sigma, annuity, days_to_maturity):
    """ This function returns Vega, which is the same for both payer/receiver
    :param forward:
    :param strike:
    :param implied_sigma:
    :param annuity:
    :param days_to_maturity:
    :return:
    """
    from scipy.stats import norm
    from math import sqrt
    d1 = calculate_d1(forward, strike, implied_sigma, days_to_maturity)
    year_fraction = float(days_to_maturity) / 365
    vega = annuity * forward * sqrt(year_fraction) * norm.pdf(d1)

    return vega
gaussian_mixture_loss.py 文件源码 项目:wide-deep-cnn 作者: DaniUPC 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def manual_loss(self, logits, targets, K):
        """ Computes the Gaussian Mixture Loss out of the graph computation """
        mixs, sigmas, means = logits[:, 0:K], logits[:, K:2*K], logits[:, 2*K:]
        mixs = np.exp(mixs)/np.sum(np.exp(mixs))  # Apply softmax
        sigmas = np.exp(sigmas)
        # Compute over all instances
        logexps = []
        for i in range(self.batch_size):
            sumexp = np.sum(
                [
                    mixs[:, k] *
                    norm.pdf(targets[i], means[i, k], sigmas[i, k])
                    for k in range(K)
                ]
            )
            logexps.append(np.log(sumexp))
        return -np.mean(logexps)
acquisition.py 文件源码 项目:pyGPGO 作者: hawk31 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def ExpectedImprovement(self, tau, mean, std):
        """
        Expected Improvement acquisition function.

        Parameters
        ----------
        tau: float
            Best observed function evaluation.
        mean: float
            Point mean of the posterior process.
        std: float
            Point std of the posterior process.

        Returns
        -------
        float
            Expected improvement.
        """
        z = (mean - tau - self.eps) / (std + self.eps)
        return (mean - tau) * norm.cdf(z) + std * norm.pdf(z)[0]
acquisition.py 文件源码 项目:pyGPGO 作者: hawk31 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def tExpectedImprovement(self, tau, mean, std, nu=3.0):
        """
        Expected Improvement acquisition function. Only to be used with `tStudentProcess` surrogate.

        Parameters
        ----------
        tau: float
            Best observed function evaluation.
        mean: float
            Point mean of the posterior process.
        std: float
            Point std of the posterior process.

        Returns
        -------
        float
            Expected improvement.
        """
        gamma = (mean - tau - self.eps) / (std + self.eps)
        return gamma * std * t.cdf(gamma, df=nu) + std * (1 + (gamma ** 2 - 1)/(nu - 1)) * t.pdf(gamma, df=nu)
tune_hyperparms_regression.py 文件源码 项目:Gaussian_process 作者: happyjin 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def EI(params, means, stand_devi, parms_done, y, n_iterations, k):
    """
    Expected Improvement acquisition function
    :param params: test data
    :param means: GP posterior mean
    :param stand_devi: standard deviation
    :param parms_done: training data
    :param y: training targets
    :return: next point that need to pick up
    """
    s = 0.0005  # small value
    max_mean = np.max(y)

    f_max = max_mean + s
    z = (means - f_max) / stand_devi
    EI_vector = (means - f_max) * norm.cdf(z) + stand_devi * norm.pdf(z)
    max_index = np.where(EI_vector == np.max(EI_vector))
    next_point = params[max_index]
    if k == n_iterations-1:
        plt.subplot(2, 1, 2)
        plt.plot(params, EI_vector, label='EI')
        plt.legend(loc=3)
    return next_point
minibatch_discrimination.py 文件源码 项目:the-neural-perspective 作者: GokuMohandas 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def plot_data_and_D(sess, model, FLAGS):

    # True data distribution with untrained D
    f, ax = plt.subplots(1)

    # p_data
    X = np.linspace(int(FLAGS.mu-3.0*FLAGS.sigma),
                    int(FLAGS.mu+3.0*FLAGS.sigma),
                    FLAGS.num_points)
    y = norm.pdf(X, loc=FLAGS.mu, scale=FLAGS.sigma)
    ax.plot(X, y, label='p_data')

    # Untrained p_discriminator
    untrained_D = np.zeros((FLAGS.num_points,1))
    for i in range(FLAGS.num_points/FLAGS.batch_size):
        batch_X = np.reshape(
            X[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)],
            (FLAGS.batch_size,1))
        untrained_D[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)] = \
            sess.run(model.D,
                feed_dict={model.pretrained_inputs: batch_X})
    ax.plot(X, untrained_D, label='untrained_D')

    plt.legend()
    plt.show()
gan.py 文件源码 项目:the-neural-perspective 作者: GokuMohandas 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def plot_data_and_D(sess, model, FLAGS):

    # True data distribution with untrained D
    f, ax = plt.subplots(1)

    # p_data
    X = np.linspace(int(FLAGS.mu-3.0*FLAGS.sigma),
                    int(FLAGS.mu+3.0*FLAGS.sigma),
                    FLAGS.num_points)
    y = norm.pdf(X, loc=FLAGS.mu, scale=FLAGS.sigma)
    ax.plot(X, y, label='p_data')

    # Untrained p_discriminator
    untrained_D = np.zeros((FLAGS.num_points,1))
    for i in range(FLAGS.num_points/FLAGS.batch_size):
        batch_X = np.reshape(
            X[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)],
            (FLAGS.batch_size,1))
        untrained_D[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)] = \
            sess.run(model.D,
                feed_dict={model.pretrained_inputs: batch_X})
    ax.plot(X, untrained_D, label='untrained_D')

    plt.legend()
    plt.show()
feature_matching.py 文件源码 项目:the-neural-perspective 作者: GokuMohandas 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def plot_data_and_D(sess, model, FLAGS):

    # True data distribution with untrained D
    f, ax = plt.subplots(1)

    # p_data
    X = np.linspace(int(FLAGS.mu-3.0*FLAGS.sigma),
                    int(FLAGS.mu+3.0*FLAGS.sigma),
                    FLAGS.num_points)
    y = norm.pdf(X, loc=FLAGS.mu, scale=FLAGS.sigma)
    ax.plot(X, y, label='p_data')

    # Untrained p_discriminator
    untrained_D = np.zeros((FLAGS.num_points,1))
    for i in range(FLAGS.num_points/FLAGS.batch_size):
        batch_X = np.reshape(
            X[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)],
            (FLAGS.batch_size,1))
        untrained_D[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)] = \
            sess.run(model.D,
                feed_dict={model.pretrained_inputs: batch_X})
    ax.plot(X, untrained_D, label='D')

    plt.legend()
    plt.show()
metrics.py 文件源码 项目:PyCS 作者: COSMOGRAIL 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def combigauss(subtds, subtderrs, truetds, lensmodelsigma = 0.0):
    """
    Give me submission delays and error bars, as well as the corresponding true delays, in form of numpy arrays.
    I compute the mean and sigma of the combined posterior on the fractional time delay distance error.
    """

    from scipy.stats import norm

    subtdoffs = subtds - truetds
    centers = subtdoffs/truetds
    sigmas = subtderrs/np.fabs(truetds)

    # We convolve with the lensmodelsigma:
    sigmas = np.sqrt(sigmas**2 + lensmodelsigma**2)

    sigma_combi = 1.0 / np.sqrt(np.sum(1.0 / (sigmas**2)))
    center_combi = sigma_combi**2 * np.sum( centers/sigmas**2 )

    probazero = norm.pdf(0.0, center_combi, sigma_combi)

    return (center_combi, sigma_combi, probazero)

    # To plot this you could do:
    #plt.plot(x, norm.pdf(x, center_combi, sigma_combi), ls="--", color="black")
slam_08_d_density_error_ellipse.py 文件源码 项目:slam-tutorial-code 作者: tuongngoc 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def probability_of_measurement(self, measurement, predicted_measurement):
        """Given a measurement and a predicted measurement, computes
           probability."""
        # Compute differences to real measurements.
        sigma_d = self.measurement_distance_stddev
        sigma_alpha = self.measurement_angle_stddev

        z = np.array(measurement)
        pred_z = np.array(predicted_measurement)
        z[1] = atan2(sin(z[1]), cos(z[1]))
        pred_z[1] = atan2(sin(pred_z[1]), cos(pred_z[1]))

        delta = z - pred_z
        delta[1] = atan2(sin(delta[1]), cos(delta[1]))

        P_d = normal_dist.pdf(delta[0], 0, sigma_d)
        P_alpha = normal_dist.pdf(delta[1], 0, sigma_alpha)

        return P_d * P_alpha
slam_08_c_density_estimation_question.py 文件源码 项目:slam-tutorial-code 作者: tuongngoc 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def probability_of_measurement(self, measurement, predicted_measurement):
        """Given a measurement and a predicted measurement, computes
           probability."""
        # Compute differences to real measurements.
        sigma_d = self.measurement_distance_stddev
        sigma_alpha = self.measurement_angle_stddev

        z = np.array(measurement)
        pred_z = np.array(predicted_measurement)
        z[1] = atan2(sin(z[1]), cos(z[1]))
        pred_z[1] = atan2(sin(pred_z[1]), cos(pred_z[1]))

        delta = z - pred_z
        delta[1] = atan2(sin(delta[1]), cos(delta[1]))

        P_d = normal_dist.pdf(delta[0], 0, sigma_d)
        P_alpha = normal_dist.pdf(delta[1], 0, sigma_alpha)

        return P_d * P_alpha
slam_08_b_particle_correction_question.py 文件源码 项目:slam-tutorial-code 作者: tuongngoc 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def probability_of_measurement(self, measurement, predicted_measurement):
        """Given a measurement and a predicted measurement, computes
           probability."""
        # Compute differences to real measurements.
        sigma_d = self.measurement_distance_stddev
        sigma_alpha = self.measurement_angle_stddev

        z = np.array(measurement)
        pred_z = np.array(predicted_measurement)
        z[1] = atan2(sin(z[1]), cos(z[1]))
        pred_z[1] = atan2(sin(pred_z[1]), cos(pred_z[1]))

        delta = z - pred_z
        delta[1] = atan2(sin(delta[1]), cos(delta[1]))

        P_d = normal_dist.pdf(delta[0], 0, sigma_d)
        P_alpha = normal_dist.pdf(delta[1], 0, sigma_alpha)

        return P_d * P_alpha
reader.py 文件源码 项目:crnn_tf 作者: liuhu-bigeye 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def initiate_estimations(self):
        self.estimations.close()
        self.estimations = h5py.File(self.estimate_path, 'a')

        scale = 0.5
        eps = 1e-3
        # estimation shape (n_sample, max_h_len, max_t_len + 1)
        for idx in range(self.n_samples):
            token = self.tokens[idx]
            t_len = len(token)
            h_len = int(np.ceil(float(self.db['end_index'][idx] - self.db['begin_index'][idx] - self.c3d_depth) / float(self.depth_stride))) + 1

            steps = np.linspace(0, h_len-1, t_len)
            esti = [np.ones(h_len)[:, None]*eps] + [norm.pdf(np.arange(h_len)[:, None], loc=round(l), scale=scale) for l in steps]
            esti = np.hstack(esti)

            self.estimations[self.estimate_key][idx, :h_len, :t_len + 1] = copy.deepcopy(esti / esti.sum(axis=-1)[:, None] * 0.9)
        self.estimations.close()
        self.estimations = h5py.File(self.estimate_path, 'a')
visualiser.py 文件源码 项目:Machine-Learning-Projects 作者: poke19962008 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def featureMapHist(lang='py', normed=True):
    freq = getValue(lang, type='freq')
    raw = np.array([])

    for i in xrange(len(freq)):
        raw = np.append(raw, [i]*freq[i])
    print "[SUCCESS] Calculated raw for", lang

    (mu, sigma) = np.mean(raw), np.std(raw)
    pdf = norm.pdf(raw, mu, sigma)

    if not normed:
        plt.plot(raw, pdf, label="%s"%lang)
    else:
        ax = fig.add_subplot(3, 2, langs.index(lang)+1)
        plt.plot(raw, pdf)
        ax.hist(raw, normed=True, alpha=0.75)
        ax.set_title(lang)
plot_helpers.py 文件源码 项目:Bayesian-Optimisation 作者: hyperc54 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def updateInterface1D(self,solver,bbox,history,bounds):
        x = np.linspace(0, 1, 80)
        z=map(lambda y:bbox.queryAt([y]),x)
        xx=np.atleast_2d(x).T
        z_pred, sigma2_pred = solver.gp.predict(xx, eval_MSE=True)
        self.ax_scat.clear()
        self.ax_approx.clear()
        self.ax_eimap.clear()

        self.ax_scat.plot(x,np.array(z))
        self.ax_scat.scatter(np.array(history)[:,0],np.array(history)[:,1])
        self.ax_approx.plot(x,np.array(z_pred))
        self.ax_approx.fill_between(x,np.array(z_pred)+np.array(np.sqrt(sigma2_pred)),np.array(z_pred)-np.array(np.sqrt(sigma2_pred)),alpha=0.2)

        self.ax_approx.plot(x)
        target=min(np.array(history)[:,1])
        mean,variance=solver.gp.predict(xx,eval_MSE=True)
        z=(target-mean)/np.sqrt(variance)
        self.ax_approx.plot(x,np.sqrt(variance)*(z*norm.cdf(z)+norm.pdf(z)))
plot_helpers_un.py 文件源码 项目:Bayesian-Optimisation 作者: hyperc54 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def update_interface_1D(ax,ax2,solver,bbox,history):
    x = np.linspace(0, 1, 80)
    z=map(lambda y:bbox.queryAt([y]),x)
    xx=np.atleast_2d(x).T
    z_pred, sigma2_pred = solver.gp.predict(xx, eval_MSE=True)
    ax.clear()
    ax2.clear()
    ax.plot(x,np.array(z))
    ax.scatter(np.array(history)[:,0],np.array(history)[:,1])
    ax2.plot(x,np.array(z_pred))
    ax2.fill_between(x,np.array(z_pred)+np.array(np.sqrt(sigma2_pred)),np.array(z_pred)-np.array(np.sqrt(sigma2_pred)),alpha=0.2)
    ax.set_xlim([0,1])
    ax2.set_xlim([0,1])
    ax2.plot(x)
    target=min(np.array(history)[:,1])
    mean,variance=solver.gp.predict(xx,eval_MSE=True)
    z=(target-mean)/np.sqrt(variance)
    ax2.plot(x,np.sqrt(variance)*(z*norm.cdf(z)+norm.pdf(z)))
plot_helpers_deux.py 文件源码 项目:Bayesian-Optimisation 作者: hyperc54 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def update_interface_2D(ax,ax2,ax3,solver,bbox,history):
    x = np.linspace(0, 1, 200)
    y = np.linspace(0, 1, 200)
    x,y=np.meshgrid(x, y)
    xx=x.ravel()
    yy=y.ravel()
    #z=map(lambda x:bbox.queryAt(x),[np.array(i) for i in zip(xx,yy)])
    points_pred= np.array(map(lambda s:np.array(s),zip(xx,yy)))
    z_pred, sigma2_pred = solver.gp.predict(points_pred, eval_MSE=True)
    #target=min(map(lambda x:bbox.queryAt(x),[np.array(i) for i in history]))
    target=min(list(np.array(history)[:,1]))
    u=(target-z_pred)/np.sqrt(sigma2_pred)
    ax.clear()
    ax2.clear()
    #c1=ax.contourf(x,y,np.array(z).reshape(-1,len(x[0])))
    tt=np.array(map(np.asarray,np.array(history).reshape(-1,2)[:,0]))
    ax.scatter(tt[:,0],tt[:,1])

    c2=ax2.contourf(x,y,np.array(z_pred).reshape(-1,len(x[0])))
    c3=ax3.contourf(x,y,np.array(np.sqrt(sigma2_pred)*(u*norm.cdf(u)+norm.pdf(u))).reshape(-1,len(x[0])))
    ax2.scatter(tt[:,0],tt[:,1])
    ax3.scatter(tt[:,0],tt[:,1])
    #c1.set_clim(min(z),max(z))
    #c2.set_clim(min(z),max(z))
minibatch_discrimination.py 文件源码 项目:the-neural-perspective 作者: johnsonc 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def plot_data_and_D(sess, model, FLAGS):

    # True data distribution with untrained D
    f, ax = plt.subplots(1)

    # p_data
    X = np.linspace(int(FLAGS.mu-3.0*FLAGS.sigma),
                    int(FLAGS.mu+3.0*FLAGS.sigma),
                    FLAGS.num_points)
    y = norm.pdf(X, loc=FLAGS.mu, scale=FLAGS.sigma)
    ax.plot(X, y, label='p_data')

    # Untrained p_discriminator
    untrained_D = np.zeros((FLAGS.num_points,1))
    for i in range(FLAGS.num_points/FLAGS.batch_size):
        batch_X = np.reshape(
            X[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)],
            (FLAGS.batch_size,1))
        untrained_D[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)] = \
            sess.run(model.D,
                feed_dict={model.pretrained_inputs: batch_X})
    ax.plot(X, untrained_D, label='untrained_D')

    plt.legend()
    plt.show()
gan.py 文件源码 项目:the-neural-perspective 作者: johnsonc 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def plot_data_and_D(sess, model, FLAGS):

    # True data distribution with untrained D
    f, ax = plt.subplots(1)

    # p_data
    X = np.linspace(int(FLAGS.mu-3.0*FLAGS.sigma),
                    int(FLAGS.mu+3.0*FLAGS.sigma),
                    FLAGS.num_points)
    y = norm.pdf(X, loc=FLAGS.mu, scale=FLAGS.sigma)
    ax.plot(X, y, label='p_data')

    # Untrained p_discriminator
    untrained_D = np.zeros((FLAGS.num_points,1))
    for i in range(FLAGS.num_points/FLAGS.batch_size):
        batch_X = np.reshape(
            X[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)],
            (FLAGS.batch_size,1))
        untrained_D[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)] = \
            sess.run(model.D,
                feed_dict={model.pretrained_inputs: batch_X})
    ax.plot(X, untrained_D, label='untrained_D')

    plt.legend()
    plt.show()
feature_matching.py 文件源码 项目:the-neural-perspective 作者: johnsonc 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def plot_data_and_D(sess, model, FLAGS):

    # True data distribution with untrained D
    f, ax = plt.subplots(1)

    # p_data
    X = np.linspace(int(FLAGS.mu-3.0*FLAGS.sigma),
                    int(FLAGS.mu+3.0*FLAGS.sigma),
                    FLAGS.num_points)
    y = norm.pdf(X, loc=FLAGS.mu, scale=FLAGS.sigma)
    ax.plot(X, y, label='p_data')

    # Untrained p_discriminator
    untrained_D = np.zeros((FLAGS.num_points,1))
    for i in range(FLAGS.num_points/FLAGS.batch_size):
        batch_X = np.reshape(
            X[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)],
            (FLAGS.batch_size,1))
        untrained_D[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)] = \
            sess.run(model.D,
                feed_dict={model.pretrained_inputs: batch_X})
    ax.plot(X, untrained_D, label='D')

    plt.legend()
    plt.show()
slam_08_d_density_error_ellipse.py 文件源码 项目:SLAM 作者: jfjensen 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def probability_of_measurement(self, measurement, predicted_measurement):
        """Given a measurement and a predicted measurement, computes
           probability."""
        # Compute differences to real measurements.
        d_true, alpha_true = measurement
        d_pred, alpha_pred = predicted_measurement

        # --->>> Compute difference in distance and bearing angle.
        d_diff = abs(d_pred - d_true)
        alpha_diff = (alpha_pred - alpha_true + pi) % (2 * pi) - pi

        # Important: make sure the angle difference works correctly and does
        # not return values offset by 2 pi or - 2 pi.
        # You may use the following Gaussian PDF function:
        # scipy.stats.norm.pdf(x, mu, sigma). With the import in the header,
        # this is normal_dist.pdf(x, mu, sigma).
        p_d = normal_dist.pdf(d_diff, 0, self.measurement_distance_stddev)
        p_alpha = normal_dist.pdf(alpha_diff, 0, self.measurement_angle_stddev)

        # Note that the two parameters sigma_d and sigma_alpha discussed
        # in the lecture are self.measurement_distance_stddev and
        # self.measurement_angle_stddev.
        return p_d * p_alpha


问题


面经


文章

微信
公众号

扫码关注公众号