python类beta()的实例源码

beta_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def testBetaCdf(self):
    with self.test_session():
      shape = (30, 40, 50)
      for dt in (np.float32, np.float64):
        a = 10. * np.random.random(shape).astype(dt)
        b = 10. * np.random.random(shape).astype(dt)
        x = np.random.random(shape).astype(dt)
        actual = beta_lib.Beta(a, b).cdf(x).eval()
        self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
        self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
        self.assertAllClose(stats.beta.cdf(x, a, b), actual, rtol=1e-4, atol=0)
beta_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def testBetaLogCdf(self):
    with self.test_session():
      shape = (30, 40, 50)
      for dt in (np.float32, np.float64):
        a = 10. * np.random.random(shape).astype(dt)
        b = 10. * np.random.random(shape).astype(dt)
        x = np.random.random(shape).astype(dt)
        actual = math_ops.exp(beta_lib.Beta(a, b).log_cdf(x)).eval()
        self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
        self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
        self.assertAllClose(stats.beta.cdf(x, a, b), actual, rtol=1e-4, atol=0)
logit_rules.py 文件源码 项目:mitre 作者: gerberlab 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, model, r0, local_updates_per_structure_update=10):
        self.model = model
        # Ensure r0 is sorted within each sublist
        r0 = r0.copy()
        for i in xrange(len(r0)):
            model.rule_population.sort_list_of_primitives(r0[i])
        # This may be a bad choice of omega_0...
        omega = np.ones(self.model.data.y.shape) 
        # Come up with some sensible initial values for the various
        # structure parameters
        phylogeny_mean = self.model.phylogeny_lambda_l
        phylogeny_std = min(self.model.phylogeny_std_upper_bound,
                            5.0*self.model.phylogeny_delta_l)
        self.current_state = LogisticRuleState(
            r0,omega,np.zeros(len(r0)+model.data.n_fixed_covariates),
            phylogeny_mean, phylogeny_std
         )
        # Could include X in the state except we don't really want to 
        # track it over the whole sampling process; definitely do 
        # need it to persist between subparts of iterations at least, though
        self.current_X = self.model.data.covariate_matrix(r0)
        # We specified an arbitary beta: we will overwrite it when we draw
        # it from its conditional in the first iteration. Because this
        # is not a valid value of beta, we don't add the initial state
        # to self.states.
        self.initial_state = self.current_state.copy()
        self.states = []
        ### The following are not updated after updating beta/z, but are 
        # after every change or attempted change to the rule list structure.
        self.likelihoods = []
        self.priors = [] 
        self.comments = []
        self.attempts = {}
        self.successes = {}
        self.local_updates_per_structure_update = (
            local_updates_per_structure_update
            )
        # We do want some better resolution on the distribution of 
        # coefficients for each state, so we'll store the interstitial beta
        # samples before each structure update step:
        self.additional_beta_values = []

        ### These don't conceptually need to be attributes of the sampler
        # object but this way we avoid recalculating them every iteration
        self.phylogeny_mean_proposal_std = (
            0.5*self.model.phylogeny_delta_l
        )
        self.phylogeny_std_proposal_std = (
            0.5*self.model.phylogeny_delta_l
        )
        self.window_fraction_proposal_std = (
            self.model.tmin /
            (self.model.data.experiment_end - 
             self.model.data.experiment_start)
        )
        self.window_concentration_proposal_std = (
            self.model.window_concentration_typical *
            self.model.window_concentration_update_ratio
        )
priors.py 文件源码 项目:ExoSOFT 作者: kylemede 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, ecc_prior=True, p_prior=True, inc_prior=True,
                 m1_prior=True, m2_prior=True, para_prior=True,
                 para_est=0, para_err=0, m1_est=0, m1_err=0, m2_est=0, m2_err=0,
                 ecc_beta_a=0.867, ecc_beta_b=3.03,
                 ecc_J08_sig=0.3, ecc_Rexp_lamda=5.12, ecc_Rexp_a=0.781,
                 ecc_Rexp_sig=0.272, ecc_ST08_a=4.33, ecc_ST08_k=0.2431,p_gamma=-0.7,
                 mins_ary=[],maxs_ary=[]):
        ## check min and max range arrays
        if (len(mins_ary)>1) and (len(maxs_ary)>1):
            self.mins_ary = mins_ary
            self.maxs_ary = maxs_ary
        else:
            raise IOError('\n\n No min/max ranges were provided to the priors object!!')
        ## push in two manual constants
        self.days_per_year = 365.2422
        self.sec_per_year = 60*60*24*self.days_per_year
        ## choices
        ## choices:'2e', 'ST08','J08', 'RayExp', 'beta', 'uniform'. Default is 'beta'.
        self.e_prior = ecc_prior
        ## `best-fit' alpha and beta from Kipping+2013
        self.ecc_beta = stats.beta(ecc_beta_a,ecc_beta_b) #https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html
        ## 'best-fit' for the basic Rayleigh from Juric+2008
        self.ecc_J08_sig = ecc_J08_sig      ### Put this into the advanced settings ???
        ## `best-fit' alpha, lambda and sig from Kipping+2013
        self.ecc_Rexp_lamda = ecc_Rexp_lamda  ### Put this into the advanced settings ???
        self.ecc_Rexp_a = ecc_Rexp_a     ### Put this into the advanced settings ???
        self.ecc_Rexp_sig = ecc_Rexp_sig ### Put this into the advanced settings ???
        #https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rayleigh.html#scipy.stats.rayleigh
        self.ecc_R = stats.rayleigh() 
        #https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.expon.html
        self.ecc_exp = stats.expon()  
        ## `best-fit' for the Shen&Turner 2008 pdf
        self.ecc_ST08_a = ecc_ST08_a     ### Put this into the advanced settings ???
        self.ecc_ST08_k = ecc_ST08_k     ### Put this into the advanced settings ???

        #self.ecc_norm = stats.norm
        #self.ecc_norm_mean = ##
        #self.ecc_norm_sig = ##
        #self.ecc_norm.pdf(ecc,loc=self.ecc_norm_mean, scale=self.ecc_norm_sig)

        ## choices: power-law, Jeffrey's
        self.p_prior = p_prior
        self.p_gamma = p_gamma
        ## choices:sin, cos
        self.inc_prior = inc_prior
        ## choices:IMF, PDMF
        self.m1_prior = m1_prior
        ## choices:CMF, IMF, PDMF
        self.m2_prior = m2_prior
        ## choices:gauss
        self.para_prior = para_prior
        ## values necessary to calculate priors
        self.para_est = para_est
        self.para_err = para_err
        self.m1_est = m1_est
        self.m1_err = m1_err
        self.m2_est = m2_est
        self.m2_err = m2_err
priors.py 文件源码 项目:ExoSOFT 作者: kylemede 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, ecc_prior=True, p_prior=True, inc_prior=True,
                 m1_prior=True, m2_prior=True, para_prior=True,
                 para_est=0, para_err=0, m1_est=0, m1_err=0, m2_est=0, m2_err=0,
                 ecc_beta_a=0.867, ecc_beta_b=3.03,
                 ecc_J08_sig=0.3, ecc_Rexp_lamda=5.12, ecc_Rexp_a=0.781,
                 ecc_Rexp_sig=0.272, ecc_ST08_a=4.33, ecc_ST08_k=0.2431,p_gamma=-0.7,
                 mins_ary=[],maxs_ary=[]):
        ## check min and max range arrays
        if (len(mins_ary)>1) and (len(maxs_ary)>1):
            self.mins_ary = mins_ary
            self.maxs_ary = maxs_ary
        else:
            raise IOError('\n\n No min/max ranges were provided to the priors object!!')
        ## push in two manual constants
        self.days_per_year = 365.2422
        self.sec_per_year = 60*60*24*self.days_per_year
        ## choices
        ## choices:'2e', 'ST08','J08', 'RayExp', 'beta', 'uniform'. Default is 'beta'.
        self.e_prior = ecc_prior
        ## `best-fit' alpha and beta from Kipping+2013
        self.ecc_beta = stats.beta(ecc_beta_a,ecc_beta_b) #https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html
        ## 'best-fit' for the basic Rayleigh from Juric+2008
        self.ecc_J08_sig = ecc_J08_sig      ### Put this into the advanced settings ???
        ## `best-fit' alpha, lambda and sig from Kipping+2013
        self.ecc_Rexp_lamda = ecc_Rexp_lamda  ### Put this into the advanced settings ???
        self.ecc_Rexp_a = ecc_Rexp_a     ### Put this into the advanced settings ???
        self.ecc_Rexp_sig = ecc_Rexp_sig ### Put this into the advanced settings ???
        #https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rayleigh.html#scipy.stats.rayleigh
        self.ecc_R = stats.rayleigh() 
        #https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.expon.html
        self.ecc_exp = stats.expon()  
        ## `best-fit' for the Shen&Turner 2008 pdf
        self.ecc_ST08_a = ecc_ST08_a     ### Put this into the advanced settings ???
        self.ecc_ST08_k = ecc_ST08_k     ### Put this into the advanced settings ???

        #self.ecc_norm = stats.norm
        #self.ecc_norm_mean = ##
        #self.ecc_norm_sig = ##
        #self.ecc_norm.pdf(ecc,loc=self.ecc_norm_mean, scale=self.ecc_norm_sig)

        ## choices: power-law, Jeffrey's
        self.p_prior = p_prior
        self.p_gamma = p_gamma
        ## choices:sin, cos
        self.inc_prior = inc_prior
        ## choices:IMF, PDMF
        self.m1_prior = m1_prior
        ## choices:CMF, IMF, PDMF
        self.m2_prior = m2_prior
        ## choices:gauss
        self.para_prior = para_prior
        ## values necessary to calculate priors
        self.para_est = para_est
        self.para_err = para_err
        self.m1_est = m1_est
        self.m1_err = m1_err
        self.m2_est = m2_est
        self.m2_err = m2_err
pullspectra.py 文件源码 项目:ThePayne 作者: pacargile 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self,**kwargs):
        # define the [Fe/H] array, this is the values that the MIST 
        # and C3K grids are built
        self.FeHarr = ([-4.0,-3.5,-3.0,-2.75,-2.5,-2.25,-2.0,-1.75,
            -1.5,-1.25,-1.0,-0.75,-0.5,-0.25,0.0,0.25,0.5])

        # define the [alpha/Fe] array
        self.alphaarr = [0.0,0.2,0.4]

        # define aliases for the MIST isochrones and C3K/CKC files
        self.MISTpath = kwargs.get('MISTpath',Payne.__abspath__+'data/MIST/')
        self.C3Kpath  = kwargs.get('C3Kpath',Payne.__abspath__+'data/C3K/')

        # load MIST models
        self.MIST = h5py.File(self.MISTpath+'/MIST_1.2_EEPtrk.h5','r')
        self.MISTindex = list(self.MIST['index'])

        # create weights for Teff
        # determine the min/max Teff from MIST
        MISTTeffmin = np.inf
        MISTTeffmax = 0.0
        for ind in self.MISTindex:
            MISTTeffmin_i = self.MIST[ind]['log_Teff'].min()
            MISTTeffmax_i = self.MIST[ind]['log_Teff'].max()
            if MISTTeffmin_i < MISTTeffmin:
                MISTTeffmin = MISTTeffmin_i
            if MISTTeffmax_i > MISTTeffmax:
                MISTTeffmax = MISTTeffmax_i
        self.teffwgts = beta(0.5,0.5,loc=MISTTeffmin-10.0,scale=(MISTTeffmax+10.0)-(MISTTeffmin-10.0))

        # create weights for [Fe/H]
        self.fehwgts = beta(1.0,0.5,loc=-2.1,scale=2.7).pdf(self.FeHarr)
        self.fehwgts = self.fehwgts/np.sum(self.fehwgts)

        # create a dictionary for the C3K models and populate it for different
        # metallicities
        self.C3K = {}
        for aa in self.alphaarr:
            self.C3K[aa] = {}
            for mm in self.FeHarr:
                self.C3K[aa][mm] = h5py.File(
                    self.C3Kpath+'/c3k_v1.3_feh{0:+4.2f}_afe{1:+3.1f}.full.h5'.format(mm,aa),
                    'r')
test_abc_smc_algorithm.py 文件源码 项目:pyabc 作者: neuralyzer 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_beta_binomial_different_priors_initial_epsilon_from_sample(db_path,
                                                                    sampler):
    binomial_n = 5

    def model(args):
        return {"result": st.binom(binomial_n, args.theta).rvs()}

    models = [model for _ in range(2)]
    models = list(map(SimpleModel, models))
    population_size = ConstantPopulationSize(800)
    a1, b1 = 1, 1
    a2, b2 = 10, 1
    parameter_given_model_prior_distribution = [Distribution(theta=RV("beta",
                                                                      a1, b1)),
                                                Distribution(theta=RV("beta",
                                                                      a2, b2))]
    abc = ABCSMC(models, parameter_given_model_prior_distribution,
                 MinMaxDistanceFunction(measures_to_use=["result"]),
                 population_size,
                 eps=MedianEpsilon(median_multiplier=.9),
                 sampler=sampler)
    n1 = 2
    abc.new(db_path, {"result": n1})

    minimum_epsilon = -1
    history = abc.run(minimum_epsilon, max_nr_populations=5)
    mp = history.get_model_probabilities(history.max_t)

    def B(a, b):
        return gamma(a) * gamma(b) / gamma(a + b)

    def expected_p(a, b, n1):
        return binom(binomial_n, n1) * B(a + n1, b + binomial_n - n1) / B(a, b)

    p1_expected_unnormalized = expected_p(a1, b1, n1)
    p2_expected_unnormalized = expected_p(a2, b2, n1)
    p1_expected = p1_expected_unnormalized / (p1_expected_unnormalized +
                                              p2_expected_unnormalized)
    p2_expected = p2_expected_unnormalized / (p1_expected_unnormalized +
                                              p2_expected_unnormalized)

    assert abs(mp.p[0] - p1_expected) + abs(mp.p[1] - p2_expected) < .08


问题


面经


文章

微信
公众号

扫码关注公众号