python类uniform()的实例源码

modelSelection.py 文件源码 项目:scrap 作者: BruceJohnJennerLawso 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, data, mleDiffCutoff=1.0):
        print [min(data), max(data)]

        distributions = [st.laplace, st.norm, st.expon, st.dweibull, st.invweibull, st.lognorm, st.uniform]
        mles = []

        for distribution in distributions:
            pars = distribution.fit(data)
            mle = distribution.nnlf(pars, data)
            mles.append(mle)

        results = [(distribution.name, mle) for distribution, mle in zip(distributions, mles)]

        for dist in sorted(zip(distributions, mles), key=lambda d: d[1]):
            print dist
        best_fit = sorted(zip(distributions, mles), key=lambda d: d[1])[0]
        print 'Best fit reached using {}, MLE value: {}'.format(best_fit[0].name, best_fit[1])          

        self.modelSets = []

        self.modelOptions = [mod[0].name for mod in sorted(zip(distributions, mles), key=lambda d: d[1])]
        ## list of scipy distribution ids sorted by their MLEs given the data
        ## [0] is best, [1], next best and so on


        for model in sorted(zip(distributions, mles), key=lambda d: d[1]):
            if(model[0].name in getAvailableDistributionsByScipyIds()):
                try:
                    modelDist = getDistributionByScipyId(model[0].name, data)
                    self.modelSets.append([modelDist, model[1]])
                    ## append the distribution object and the MLE value for this
                    ## particular distribution & the data

                    ## ah frig, I think in the bimodal case, it will be
                    ## something like 
                except RuntimeError:
                    pass    
            else:
                ## nothing that can be done here, if we dont have a object of
                ## the distribution needed available, we cant do much about it
                pass
alf.py 文件源码 项目:alf-python 作者: gbrammer 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_default_priors(param_names, limits=PRIOR_LIMITS):
        from scipy.stats import uniform
        from collections import OrderedDict

        prior = OrderedDict()
        for p in param_names:
            if p in limits:
                lim = limits[p]
                prior[p] = uniform(loc=lim[0], scale=lim[1]-lim[0])
            else:
                prior[p] = uniform(loc=-1.e10, scale=2.e10)

        return prior
coverage.py 文件源码 项目:ngsphy 作者: merlyescalona 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def value(self,samples=1):
        """
        Samples number of values given from the specific distribution.
        ------------------------------------------------------------------------
        - samples: number of values that will be returned.
        """
        value=0
        try:
            for item in self.__params:
                if item==0: break
            if item==0: value=[0]*samples
            else:
                if self.__name=="b": value=self.binom(samples)
                if self.__name=="e": value=self.exponential(samples)
                if self.__name=="f": value=self.fixed(samples)
                if self.__name=="g": value=self.gamma(samples)
                if self.__name=="g1": value=self.gamma1(samples)
                if self.__name=="ln": value=self.lognormal(samples)
                if self.__name=="n": value=self.normal(samples)
                if self.__name=="nb": value=self.nbinom(samples)
                if self.__name=="p": value=self.poisson(samples)
                if self.__name=="u": value=self.uniform(samples)
        except Exception as ex:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            message="\n\tUnexpected: {0} | {1} - File: {2} - Line:{3}".format(\
                ex,exc_type, fname, exc_tb.tb_lineno)
            status=False
            raise Exception(message)
            # self.appLogger.error(message)
            # sys.exit()

        return value
coverage.py 文件源码 项目:ngsphy 作者: merlyescalona 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def uniform(self,samples):
        """
        Sampling from a Poisson distribution
        Parameters:
        meean
        ------------------------------------------------------------------------
        - samples: number of values that will be returned.
        """
        minParam=float(self.__params[0]*1.0)
        maxParam=float(self.__params[1]*1.0)
        f= np.random.uniform(low=minParam,high=maxParam,size=samples)
        return f
distro.py 文件源码 项目:pygcam 作者: JGCRI 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def uniformMinMax(min, max):
    return uniform(loc=min, scale=(max - min))
test_grid_search.py 文件源码 项目:Parallel-SGD 作者: angadgill 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_param_sampler():
    # test basic properties of param sampler
    param_distributions = {"kernel": ["rbf", "linear"],
                           "C": uniform(0, 1)}
    sampler = ParameterSampler(param_distributions=param_distributions,
                               n_iter=10, random_state=0)
    samples = [x for x in sampler]
    assert_equal(len(samples), 10)
    for sample in samples:
        assert_true(sample["kernel"] in ["rbf", "linear"])
        assert_true(0 <= sample["C"] <= 1)
fast_em.py 文件源码 项目:crayimage 作者: yandexdataschool 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def gen(self, normal_mu_range, anomaly_mu_range):
    self.gens = [
      compound_distribution(
        stats.uniform(loc=anomaly_mu_range[0], scale=anomaly_mu_range[1] - anomaly_mu_range[0]),
        truncated(stats.poisson, max_value=1024)
      ),

      compound_distribution(
        stats.uniform(loc=normal_mu_range[0], scale=normal_mu_range[1] - normal_mu_range[0]),
        truncated(stats.poisson, max_value=1024)
      )
    ]

    self.priors = np.array([0.1, 0.9])

    n = 10
    MC = CameraMC(self.priors, self.gens, image_shape=(1, n, n), n_frames=100)

    self.cats, self.params, self.imgs = MC.get_sample()
    self.hists = ndcount(self.imgs).reshape(n, n, -1)
    self.hists = self.hists.astype('float32') / np.sum(self.hists, axis=2)[:, :, None]
    self.cats = self.cats.reshape(-1)

    print("Img shape %s" % (self.imgs.shape, ))
    print("Hists shape %s" % (self.hists.shape, ))
    print("Categories shape %s" % (self.cats.shape, ))
trainable_em.py 文件源码 项目:crayimage 作者: yandexdataschool 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def gen(self, normal_mu_range, anomaly_mu_range):
    self.gens = [
      compound_distribution(
        stats.uniform(loc=anomaly_mu_range[0], scale=anomaly_mu_range[1] - anomaly_mu_range[0]),
        truncated(stats.poisson, max_value=1024)
      ),

      compound_distribution(
        stats.uniform(loc=normal_mu_range[0], scale=normal_mu_range[1] - normal_mu_range[0]),
        truncated(stats.poisson, max_value=1024)
      )
    ]

    self.priors = np.array([0.1, 0.9])

    n = 100
    m = 10
    bins = 64
    MC = CameraMC(self.priors, self.gens, image_shape=(1, n, ), n_frames=100, max_value=bins)

    X = np.ndarray(shape=(m, n, bins), dtype='float32')
    cats = np.ndarray(shape=(m, n), dtype='float32')

    for i in xrange(m):
      cats[i], _, imgs = MC.get_sample()
      h = ndcount(imgs, bins=bins)
      print h.shape
      h = h.reshape(n, bins)

      X[i] = h.astype('float32') / np.sum(h, axis=1)[:, None]

    print("X shape %s" % (X.shape, ))
    print("Categories shape %s" % (cats.shape, ))

    self.X = X
    self.cats = cats
bayesian_utils.py 文件源码 项目:crayimage 作者: yandexdataschool 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_separable(self):
    bins = 10
    frames = 100

    comp1 = compound_distribution(
      parameter_distribution=stats.uniform(0.0, 0.25),
      signal_family=lambda p: stats.binom(bins - 1, p),
      binarize_signal=False, bins = bins
    )

    comp2 = compound_distribution(
      parameter_distribution=stats.uniform(0.5, 1.0),
      signal_family=lambda p: stats.binom(bins - 1, p),
      binarize_signal=False, bins=bins
    )

    grid1 = np.linspace(0.0, 0.25, num=200)
    grid2 = np.linspace(0.5, 1.0, num=200)

    prior1, prior2 = 0.5, 0.5

    gen = CompoundMC(
      category_priors=[prior1, prior2],
      compounds=[comp1, comp2],
      n_pixels=100, n_frames=frames
    )

    cats, params, X = gen.rvs(size=1)

    clf = FastBayesianClassifier(
      priors=[prior1, prior2],
      compounds=[comp1, comp2],
      parameter_grids=[grid1, grid2]
    )

    y = clf.predict_proba(X)

    print np.sum(np.argmax(cats, axis=1) != y)
uniform_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def testUniformRange(self):
    with self.test_session():
      a = 3.0
      b = 10.0
      uniform = uniform_lib.Uniform(a=a, b=b)
      self.assertAllClose(a, uniform.a.eval())
      self.assertAllClose(b, uniform.b.eval())
      self.assertAllClose(b - a, uniform.range().eval())
uniform_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def testUniformShape(self):
    with self.test_session():
      a = constant_op.constant([-3.0] * 5)
      b = constant_op.constant(11.0)
      uniform = uniform_lib.Uniform(a=a, b=b)

      self.assertEqual(uniform.batch_shape().eval(), (5,))
      self.assertEqual(uniform.get_batch_shape(), tensor_shape.TensorShape([5]))
      self.assertAllEqual(uniform.event_shape().eval(), [])
      self.assertEqual(uniform.get_event_shape(), tensor_shape.TensorShape([]))
uniform_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def testUniformPDFWithScalarEndpoint(self):
    with self.test_session():
      a = constant_op.constant([0.0, 5.0])
      b = constant_op.constant(10.0)
      uniform = uniform_lib.Uniform(a=a, b=b)

      x = np.array([0.0, 8.0], dtype=np.float32)
      expected_pdf = np.array([1.0 / (10.0 - 0.0), 1.0 / (10.0 - 5.0)])

      pdf = uniform.prob(x)
      self.assertAllClose(expected_pdf, pdf.eval())
uniform_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def testUniformAssertMaxGtMin(self):
    with self.test_session():
      a_v = np.array([1.0, 1.0, 1.0], dtype=np.float32)
      b_v = np.array([1.0, 2.0, 3.0], dtype=np.float32)
      uniform = uniform_lib.Uniform(a=a_v, b=b_v, validate_args=True)

      with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,
                                               "x < y"):
        uniform.a.eval()
uniform_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _testUniformSampleMultiDimensional(self):
    # DISABLED: Please enable this test once b/issues/30149644 is resolved.
    with self.test_session():
      batch_size = 2
      a_v = [3.0, 22.0]
      b_v = [13.0, 35.0]
      a = constant_op.constant([a_v] * batch_size)
      b = constant_op.constant([b_v] * batch_size)

      uniform = uniform_lib.Uniform(a=a, b=b)

      n_v = 100000
      n = constant_op.constant(n_v)
      samples = uniform.sample(n)
      self.assertEqual(samples.get_shape(), (n_v, batch_size, 2))

      sample_values = samples.eval()

      self.assertFalse(
          np.any(sample_values[:, 0, 0] < a_v[0]) or
          np.any(sample_values[:, 0, 0] >= b_v[0]))
      self.assertFalse(
          np.any(sample_values[:, 0, 1] < a_v[1]) or
          np.any(sample_values[:, 0, 1] >= b_v[1]))

      self.assertAllClose(
          sample_values[:, 0, 0].mean(), (a_v[0] + b_v[0]) / 2, atol=1e-2)
      self.assertAllClose(
          sample_values[:, 0, 1].mean(), (a_v[1] + b_v[1]) / 2, atol=1e-2)
uniform_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def testUniformMean(self):
    with self.test_session():
      a = 10.0
      b = 100.0
      uniform = uniform_lib.Uniform(a=a, b=b)
      s_uniform = stats.uniform(loc=a, scale=b - a)
      self.assertAllClose(uniform.mean().eval(), s_uniform.mean())
uniform_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def testUniformVariance(self):
    with self.test_session():
      a = 10.0
      b = 100.0
      uniform = uniform_lib.Uniform(a=a, b=b)
      s_uniform = stats.uniform(loc=a, scale=b - a)
      self.assertAllClose(uniform.variance().eval(), s_uniform.var())
uniform_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def testUniformSamplePdf(self):
    with self.test_session():
      a = 10.0
      b = [11.0, 100.0]
      uniform = uniform_lib.Uniform(a, b)
      self.assertTrue(
          math_ops.reduce_all(uniform.prob(uniform.sample(10)) > 0).eval())
uniform_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def testUniformBroadcasting(self):
    with self.test_session():
      a = 10.0
      b = [11.0, 20.0]
      uniform = uniform_lib.Uniform(a, b)

      pdf = uniform.prob([[10.5, 11.5], [9.0, 19.0], [10.5, 21.0]])
      expected_pdf = np.array([[1.0, 0.1], [0.0, 0.1], [1.0, 0.0]])
      self.assertAllClose(expected_pdf, pdf.eval())
generation.py 文件源码 项目:astrobase 作者: waqasbhatti 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def generate_rrab_lightcurve(
        times,
        mags=None,
        errs=None,
        paramdists={
            'period':sps.uniform(loc=0.45,scale=0.35),
            'fourierorder':[8,11],
            'amplitude':sps.uniform(loc=0.4,scale=0.5),
            'phioffset':np.pi,
        },
        magsarefluxes=False
):
    '''This generates fake RRab light curves.

    times is an array of time values that will be used as the time base.

    mags and errs will have the model mags applied to them. If either is None,
    np.full_like(times, 0.0) will used as a substitute.

    paramdists is a dict containing parameter distributions to use for the
    transitparams, in order:

    {'period', 'fourierorder', 'amplitude'}

    These are all 'frozen' scipy.stats distribution objects, e.g.:

    https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions

    The minimum light curve epoch will be automatically chosen from a uniform
    distribution between times.min() and times.max().

    The amplitude will be flipped automatically as appropriate if
    magsarefluxes=True.

    '''

    modeldict = generate_sinusoidal_lightcurve(times,
                                               mags=mags,
                                               errs=errs,
                                               paramdists=paramdists,
                                               magsarefluxes=magsarefluxes)
    modeldict['vartype'] = 'RRab'
    return modeldict
generation.py 文件源码 项目:astrobase 作者: waqasbhatti 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def generate_rrc_lightcurve(
        times,
        mags=None,
        errs=None,
        paramdists={
            'period':sps.uniform(loc=0.10,scale=0.30),
            'fourierorder':[2,3],
            'amplitude':sps.uniform(loc=0.1,scale=0.3),
            'phioffset':1.5*np.pi,
        },
        magsarefluxes=False
):
    '''This generates fake RRc light curves.

    times is an array of time values that will be used as the time base.

    mags and errs will have the model mags applied to them. If either is None,
    np.full_like(times, 0.0) will used as a substitute.

    paramdists is a dict containing parameter distributions to use for the
    transitparams, in order:

    {'period', 'fourierorder', 'amplitude'}

    These are all 'frozen' scipy.stats distribution objects, e.g.:

    https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions

    The minimum light curve epoch will be automatically chosen from a uniform
    distribution between times.min() and times.max().

    The amplitude will be flipped automatically as appropriate if
    magsarefluxes=True.

    '''

    modeldict = generate_sinusoidal_lightcurve(times,
                                               mags=mags,
                                               errs=errs,
                                               paramdists=paramdists,
                                               magsarefluxes=magsarefluxes)
    modeldict['vartype'] = 'RRc'
    return modeldict


问题


面经


文章

微信
公众号

扫码关注公众号