def __init__(self, P_min, P_max, trend_cls=None,
jitter=None, jitter_unit=None, anomaly_tol=1E-10):
# the names of the default parameters
self.default_params = ['P', 'phi0', 'ecc', 'omega', 'jitter', 'K']
self.P_min = P_min
self.P_max = P_max
self.anomaly_tol = float(anomaly_tol)
# TODO: validate the specified long-term velocity trends
# if trend is not None and not isinstance(trend, PolynomialVelocityTrend):
# raise TypeError("Velocity trends must be PolynomialVelocityTrend "
# "instances, not '{0}'".format(type(trend)))
if trend_cls is None: # by default, assume constant
trend_cls = VelocityTrend1
self.trend_cls = trend_cls
self._n_trend = len(self.trend_cls.parameters)
# validate the input jitter specification
if jitter is None:
jitter = 0 * u.km/u.s
if isiterable(jitter):
if len(jitter) != 2:
raise ValueError("If specifying parameters for the jitter prior, you "
"must pass in a length-2 container containing the "
"mean and standard deviation of the Gaussian over "
"log(jitter^2)")
if jitter_unit is None or not isinstance(jitter_unit, u.UnitBase):
raise TypeError("If specifying parameters for the jitter prior, you "
"must also specify the units of the jitter for "
"evaluating the prior as an astropy.units.UnitBase "
"instance.")
self._fixed_jitter = False
self._jitter_unit = jitter_unit
self.jitter = jitter
else:
self._fixed_jitter = True
self.jitter = jitter
python类km()的实例源码
def __init__(self, seed=42):
np.random.seed(seed)
EPOCH = np.random.uniform(0., 40)
self.data = OrderedDict()
self.joker_params = OrderedDict()
self.truths = OrderedDict()
P = np.random.uniform(40, 80) * u.day
mjd = np.random.uniform(0, 300., 8)
_genmjd = mjd + (EPOCH % P.value)
# First just a binary
truth = dict()
truth['P'] = P
truth['K'] = np.random.uniform(5, 15) * u.km/u.s
truth['phi0'] = np.random.uniform(0., 2*np.pi) * u.radian
truth['omega'] = np.random.uniform(0., 2*np.pi) * u.radian
truth['ecc'] = np.random.uniform()
self.v0 = np.random.uniform(-100, 100) * u.km/u.s
orbit = RVOrbit(**truth)
rv = orbit.generate_rv_curve(mjd) + self.v0
err = np.full_like(rv.value, 0.01) * u.km/u.s
data = RVData(mjd, rv, stddev=err)
self.data['binary'] = data
self.joker_params['binary'] = JokerParams(P_min=8*u.day, P_max=1024*u.day)
self.truths['binary'] = truth.copy()
self.truths['binary']['phi0'] = self.truths['binary']['phi0'] - ((2*np.pi*data.t_offset/P.value))*u.radian
# hierarchical triple - long term velocity trend
self.v1 = np.random.uniform(-1, 1) * u.km/u.s/u.day
orbit = RVOrbit(**truth)
rv = orbit.generate_rv_curve(mjd) + self.v0 + self.v1*(mjd-mjd.min())*u.day
err = np.full_like(rv.value, 0.01) * u.km/u.s
data = RVData(mjd, rv, stddev=err, t_offset=mjd.min())
self.data['triple'] = data
self.joker_params['triple'] = JokerParams(P_min=8*u.day, P_max=1024*u.day,
trend_cls=VelocityTrend2)
self.truths['triple'] = truth.copy()
self.truths['triple']['phi0'] = self.truths['triple']['phi0'] - ((2*np.pi*data.t_offset/P.value))*u.radian
# Binary on circular orbit
truth = dict()
truth['P'] = P
truth['K'] = np.random.uniform(5, 15) * u.km/u.s
truth['phi0'] = np.random.uniform(0., 2*np.pi) * u.radian
truth['omega'] = 0*u.radian
truth['ecc'] = 0.
orbit = RVOrbit(**truth)
rv = orbit.generate_rv_curve(_genmjd) + self.v0
err = np.full_like(rv.value, 0.1) * u.km/u.s
data = RVData(mjd+EPOCH, rv, stddev=err)
self.data['circ_binary'] = data
self.joker_params['circ_binary'] = JokerParams(P_min=8*u.day, P_max=1024*u.day)
self.truths['circ_binary'] = truth.copy()
self.truths['circ_binary']['phi0'] = self.truths['circ_binary']['phi0'] - (2*np.pi*data.t_offset/P.value)*u.radian
def MacroBroad(data, vmacro, extend=True):
"""
This broadens the data by a given macroturbulent velocity.
It works for small wavelength ranges. I need to make a better
version that is accurate for large wavelength ranges! Sorry
for the terrible variable names, it was copied from
convol.pro in AnalyseBstar (Karolien Lefever)
Parameters:
===========
-data: kglib.utils.DataStructures.xypoint instance
Stores the data to be broadened. The data MUST
be equally-spaced before calling this!
-vmacro: float
The macroturbulent velocity, in km/s
-extend: boolean
If true, the y-axis will be extended to avoid edge-effects
Returns:
========
A broadened version of data.
"""
# Make the kernel
c = constants.c.cgs.value * units.cm.to(units.km)
sq_pi = np.sqrt(np.pi)
lambda0 = np.median(data.x)
xspacing = data.x[1] - data.x[0]
mr = vmacro * lambda0 / c
ccr = 2 / (sq_pi * mr)
px = np.arange(-data.size() / 2, data.size() / 2 + 1) * xspacing
pxmr = abs(px) / mr
profile = ccr * (np.exp(-pxmr ** 2) + sq_pi * pxmr * (erf(pxmr) - 1.0))
# Extend the xy axes to avoid edge-effects, if desired
if extend:
before = data.y[-profile.size / 2 + 1:]
after = data.y[:profile.size / 2]
extended = np.r_[before, data.y, after]
first = data.x[0] - float(int(profile.size / 2.0 + 0.5)) * xspacing
last = data.x[-1] + float(int(profile.size / 2.0 + 0.5)) * xspacing
x2 = np.linspace(first, last, extended.size)
conv_mode = "valid"
else:
extended = data.y.copy()
x2 = data.x.copy()
conv_mode = "same"
# Do the convolution
newdata = data.copy()
newdata.y = fftconvolve(extended, profile / profile.sum(), mode=conv_mode)
return newdata
def get_rv(vel, corr, Npix=None, **fit_kws):
"""
Get the best radial velocity, with errors.
This will only work if the ccf was made with the maximum likelihood method!
Uses the formula given in Zucker (2003) MNRAS, 342, 4 for the rv error.
Parameters:
===========
- vel: numpy.ndarray
The velocities
- corr: numpy.ndarray
The ccf values. Should be the same size as vel
- Npix: integer
The number of pixels used in the CCF.
Returns:
========
-rv: float
The best radial velocity, in km/s
-rv_err: float
Uncertainty in the velocity, in km/s
-ccf: float
The CCF power at the maximum velocity.
"""
vel = np.atleast_1d(vel)
corr = np.atleast_1d(corr)
sorter = np.argsort(vel)
fcn = spline(vel[sorter], corr[sorter])
fcn_prime = fcn.derivative(1)
fcn_2prime = fcn.derivative(2)
guess = vel[np.argmax(corr)]
def errfcn(v):
ll = 1e6*fcn_prime(v)**2
return ll
out = minimize_scalar(errfcn, bounds=(guess-2, guess+2), method='bounded')
rv = out.x
if Npix is None:
Npix = vel.size
rv_var = -(Npix * fcn_2prime(rv) * (fcn(rv) / (1 - fcn(rv) ** 2))) ** (-1)
return rv, np.sqrt(rv_var), fcn(rv)
def astropy_smooth(data, vel, linearize=False, kernel=convolution.Gaussian1DKernel, **kern_args):
"""
Smooth using a gaussian filter, using astropy.
Parameters:
===========
- data: kglib.utils.DataStructures.xypoint instance
The data to smooth.
- vel: float
The velocity scale to smooth out.
Can either by an astropy quantity or a float in km/s
- linearize: boolean
If True, we will put the data in a constant
log-wavelength spacing grid before smoothing.
The output has the same spacing as the input
regardless of this variable.
- kernel: astropy.convolution kernel
The astropy kernel to use. The default is the
Gaussian1DKernel.
- kern_args: Additional kernel arguments beyond width
Returns:
========
A smoothed version of the data, on the same wavelength grid as the data
"""
if linearize:
original_data = data.copy()
datafcn = spline(data.x, data.y, k=3)
linear = DataStructures.xypoint(data.x.size)
linear.x = np.logspace(np.log10(data.x[0]), np.log10(data.x[-1]), linear.size())
linear.y = datafcn(linear.x)
data = linear
# Figure out feature size in pixels
if not isinstance(vel, u.quantity.Quantity):
vel *= u.km / u.second
featuresize = (vel / constants.c).decompose().value
dlam = np.log(data.x[1] / data.x[0])
Npix = featuresize / dlam
# Make kernel and smooth
kern = kernel(Npix, **kern_args)
smoothed = convolution.convolve(data.y, kern, boundary='extend')
if linearize:
fcn = spline(data.x, smoothed)
return fcn(original_data.x)
return smoothed
def photo_lengthscale(species, source=None):
"""Photodissociation lengthscale for a gas species.
Parameters
----------
species : string
The species to look up.
source : string, optional
Retrieve values from this source (case insensitive). See
references for keys.
Returns
-------
gamma : astropy Quantity
The lengthscale at 1 au.
Example
-------
>>> from sbpy.activity import photo_lengthscale
>>> gamma = photo_lengthscale('OH')
References
----------
[CS93] H2O and OH from Table IV of Cochran & Schleicher 1993,
Icarus 105, 235-253. Quoted for intermediate solar activity.
"""
data = { # (value, {key feature: ADS bibcode})
'H2O': { 'CS93': (2.4e4 * u.km, {'H2O photodissociation lengthscale': '1993Icar..105..235C'})},
'OH': { 'CS93': (1.6e5 * u.km, {'OH photodissociation lengthscale': '1993Icar..105..235C'})},
}
default_sources = {
'H2O': 'CS93',
'OH': 'CS93',
}
assert species.upper() in data, "No timescale available for {}. Choose from: {}".format(
species, ', '.join(data.keys()))
gas = data[species.upper()]
source = default_sources[species.upper()] if source is None else source
assert source.upper() in gas, 'Source key {} not available for {}. Choose from: {}'.format(
source, species, ', '.join(gas.keys()))
gamma, bibcode = gas[source.upper()]
bib.register('activity.gas.photo_lengthscale', bibcode)
return gamma