def test_from_flam(self):
fluxd = 3.824064455850402e-15 * u.W / u.m**2 / u.um
wave = 10 * u.um
aper = 1 * u.arcsec
eph = dict(rh=1.5 * u.au, delta=1.0 * u.au)
efrho = Efrho.from_fluxd(wave, fluxd, aper, eph)
assert np.isclose(efrho.cm, 1000)
python类m()的实例源码
def __init__(self, Q, v):
assert isinstance(Q, u.Quantity)
assert Q.unit.is_equivalent((u.s**-1, u.mol / u.s))
self.Q = Q
assert isinstance(v, u.Quantity)
assert v.unit.is_equivalent(u.m / u.s)
self.v = v
def __init__(self, Q, v, parent, daughter=None):
super(Haser, self).__init__(Q, v)
bib.register('activity.gas.Haser', {'model': '1957BSRSL..43..740H'})
assert isinstance(parent, u.Quantity)
assert parent.unit.is_equivalent(u.m)
self.parent = parent
if daughter is None:
self.daughter = None
else:
assert isinstance(daughter, u.Quantity)
assert daughter.unit.is_equivalent(u.m)
self.daughter = daughter
def _convert_unit(self, rho, eph):
"""Make units match those of self."""
if not self.dim.unit.is_equivalent(rho.unit):
if rho.unit.is_equivalent(u.m):
x = rho_as_angle(rho, eph)
else:
x = rho_as_length(rho, eph)
else:
x = rho
return x
testing_support.py 文件源码
项目:algorithm-reference-library
作者: SKA-ScienceDataProcessor
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def create_named_configuration(name: str = 'LOWBD2', **kwargs) -> Configuration:
""" Standard configurations e.g. LOWBD2, MIDBD2
:param name: name of Configuration LOWBD2, LOWBD1, LOFAR, VLAA
:param rmax: Maximum distance of station from the average (m)
:return:
For LOWBD2, setting rmax gives the following number of stations
100.0 13
300.0 94
1000.0 251
3000.0 314
10000.0 398
30000.0 476
100000.0 512
"""
if name == 'LOWBD2':
location = EarthLocation(lon="116.4999", lat="-26.7000", height=300.0)
fc = create_configuration_from_file(antfile=arl_path("data/configurations/LOWBD2.csv"),
location=location, mount='xy', names='LOWBD2_%d',
diameter=35.0, **kwargs)
elif name == 'LOWBD1':
location = EarthLocation(lon="116.4999", lat="-26.7000", height=300.0)
fc = create_configuration_from_file(antfile=arl_path("data/configurations/LOWBD1.csv"),
location=location, mount='xy', names='LOWBD1_%d',
diameter=35.0, **kwargs)
elif name == 'LOWBD2-CORE':
location = EarthLocation(lon="116.4999", lat="-26.7000", height=300.0)
fc = create_configuration_from_file(antfile=arl_path("data/configurations/LOWBD2-CORE.csv"),
location=location, mount='xy', names='LOWBD2_%d',
diameter=35.0, **kwargs)
elif name == 'LOFAR':
assert get_parameter(kwargs, "meta", False) is False
fc = create_LOFAR_configuration(antfile=arl_path("data/configurations/LOFAR.csv"))
elif name == 'VLAA':
location = EarthLocation(lon="-107.6184", lat="34.0784", height=2124.0)
fc = create_configuration_from_file(antfile=arl_path("data/configurations/VLA_A_hor_xyz.csv"),
location=location,
mount='altaz',
names='VLA_%d',
diameter=25.0, **kwargs)
elif name == 'VLAA_north':
location = EarthLocation(lon="-107.6184", lat="90.000", height=2124.0)
fc = create_configuration_from_file(antfile=arl_path("data/configurations/VLA_A_hor_xyz.csv"),
location=location,
mount='altaz',
names='VLA_%d',
diameter=25.0, **kwargs)
else:
raise ValueError("No such Configuration %s" % name)
return fc
def compute_ck2004_response(self, path, verbose=False):
"""
Computes Castelli & Kurucz (2004) intensities across the entire
range of model atmospheres.
@path: path to the directory containing ck2004 SEDs
@verbose: switch to determine whether computing progress should
be printed on screen
Returns: n/a
"""
models = glob.glob(path+'/*M1.000*')
Nmodels = len(models)
# Store the length of the filename extensions for parsing:
offset = len(models[0])-models[0].rfind('.')
Teff, logg, abun = np.empty(Nmodels), np.empty(Nmodels), np.empty(Nmodels)
InormE, InormP = np.empty(Nmodels), np.empty(Nmodels)
if verbose:
print('Computing Castelli & Kurucz (2004) passband intensities for %s:%s. This will take a while.' % (self.pbset, self.pbname))
for i, model in enumerate(models):
#~ spc = np.loadtxt(model).T -- waaay slower
spc = np.fromfile(model, sep=' ').reshape(-1,2).T
Teff[i] = float(model[-17-offset:-12-offset])
logg[i] = float(model[-11-offset:-9-offset])/10
sign = 1. if model[-9-offset]=='P' else -1.
abun[i] = sign*float(model[-8-offset:-6-offset])/10
spc[0] /= 1e10 # AA -> m
spc[1] *= 1e7 # erg/s/cm^2/A -> W/m^3
wl = spc[0][(spc[0] >= self.ptf_table['wl'][0]) & (spc[0] <= self.ptf_table['wl'][-1])]
fl = spc[1][(spc[0] >= self.ptf_table['wl'][0]) & (spc[0] <= self.ptf_table['wl'][-1])]
fl *= self.ptf(wl)
flP = fl*wl
InormE[i] = np.log10(fl.sum()/self.ptf_area*(wl[1]-wl[0])) # energy-weighted intensity
InormP[i] = np.log10(flP.sum()/self.ptf_photon_area*(wl[1]-wl[0])) # photon-weighted intensity
if verbose:
if 100*i % (len(models)) == 0:
print('%d%% done.' % (100*i/(len(models)-1)))
# Store axes (Teff, logg, abun) and the full grid of Inorm, with
# nans where the grid isn't complete.
self._ck2004_axes = (np.unique(Teff), np.unique(logg), np.unique(abun))
self._ck2004_energy_grid = np.nan*np.ones((len(self._ck2004_axes[0]), len(self._ck2004_axes[1]), len(self._ck2004_axes[2]), 1))
self._ck2004_photon_grid = np.nan*np.ones((len(self._ck2004_axes[0]), len(self._ck2004_axes[1]), len(self._ck2004_axes[2]), 1))
for i, I0 in enumerate(InormE):
self._ck2004_energy_grid[Teff[i] == self._ck2004_axes[0], logg[i] == self._ck2004_axes[1], abun[i] == self._ck2004_axes[2], 0] = I0
for i, I0 in enumerate(InormP):
self._ck2004_photon_grid[Teff[i] == self._ck2004_axes[0], logg[i] == self._ck2004_axes[1], abun[i] == self._ck2004_axes[2], 0] = I0
# Tried radial basis functions but they were just terrible.
#~ self._log10_Inorm_ck2004 = interpolate.Rbf(self._ck2004_Teff, self._ck2004_logg, self._ck2004_met, self._ck2004_Inorm, function='linear')
self.content.append('ck2004')
self.atmlist.append('ck2004')
def compute_ck2004_ldcoeffs(self, plot_diagnostics=False):
if 'ck2004_all' not in self.content:
print('Castelli & Kurucz (2004) intensities are not computed yet. Please compute those first.')
return None
self._ck2004_ld_energy_grid = np.nan*np.ones((len(self._ck2004_intensity_axes[0]), len(self._ck2004_intensity_axes[1]), len(self._ck2004_intensity_axes[2]), 11))
self._ck2004_ld_photon_grid = np.nan*np.ones((len(self._ck2004_intensity_axes[0]), len(self._ck2004_intensity_axes[1]), len(self._ck2004_intensity_axes[2]), 11))
mus = self._ck2004_intensity_axes[3]
for Tindex in range(len(self._ck2004_intensity_axes[0])):
for lindex in range(len(self._ck2004_intensity_axes[1])):
for mindex in range(len(self._ck2004_intensity_axes[2])):
IsE = 10**self._ck2004_Imu_energy_grid[Tindex,lindex,mindex,:].flatten()
fEmask = np.isfinite(IsE)
if len(IsE[fEmask]) <= 1:
continue
IsE /= IsE[fEmask][-1]
IsP = 10**self._ck2004_Imu_photon_grid[Tindex,lindex,mindex,:].flatten()
fPmask = np.isfinite(IsP)
IsP /= IsP[fPmask][-1]
cElin, pcov = cfit(self._ldlaw_lin, mus[fEmask], IsE[fEmask], p0=[0.5])
cElog, pcov = cfit(self._ldlaw_log, mus[fEmask], IsE[fEmask], p0=[0.5, 0.5])
cEsqrt, pcov = cfit(self._ldlaw_sqrt, mus[fEmask], IsE[fEmask], p0=[0.5, 0.5])
cEquad, pcov = cfit(self._ldlaw_quad, mus[fEmask], IsE[fEmask], p0=[0.5, 0.5])
cEnlin, pcov = cfit(self._ldlaw_nonlin, mus[fEmask], IsE[fEmask], p0=[0.5, 0.5, 0.5, 0.5])
self._ck2004_ld_energy_grid[Tindex, lindex, mindex] = np.hstack((cElin, cElog, cEsqrt, cEquad, cEnlin))
cPlin, pcov = cfit(self._ldlaw_lin, mus[fPmask], IsP[fPmask], p0=[0.5])
cPlog, pcov = cfit(self._ldlaw_log, mus[fPmask], IsP[fPmask], p0=[0.5, 0.5])
cPsqrt, pcov = cfit(self._ldlaw_sqrt, mus[fPmask], IsP[fPmask], p0=[0.5, 0.5])
cPquad, pcov = cfit(self._ldlaw_quad, mus[fPmask], IsP[fPmask], p0=[0.5, 0.5])
cPnlin, pcov = cfit(self._ldlaw_nonlin, mus[fPmask], IsP[fPmask], p0=[0.5, 0.5, 0.5, 0.5])
self._ck2004_ld_photon_grid[Tindex, lindex, mindex] = np.hstack((cPlin, cPlog, cPsqrt, cPquad, cPnlin))
if plot_diagnostics:
if Tindex == 10 and lindex == 9 and mindex == 5:
print self._ck2004_intensity_axes[0][Tindex], self._ck2004_intensity_axes[1][lindex], self._ck2004_intensity_axes[2][mindex]
print mus, IsE
print cElin, cElog, cEsqrt
import matplotlib.pyplot as plt
plt.plot(mus[fEmask], IsE[fEmask], 'bo')
plt.plot(mus[fEmask], self._ldlaw_lin(mus[fEmask], *cElin), 'r-')
plt.plot(mus[fEmask], self._ldlaw_log(mus[fEmask], *cElog), 'g-')
plt.plot(mus[fEmask], self._ldlaw_sqrt(mus[fEmask], *cEsqrt), 'y-')
plt.plot(mus[fEmask], self._ldlaw_quad(mus[fEmask], *cEquad), 'm-')
plt.plot(mus[fEmask], self._ldlaw_nonlin(mus[fEmask], *cEnlin), 'k-')
plt.show()
self.content.append('ck2004_ld')
def create_background_image(ccd, profile_model, nsigma, separation):
"""Creates a background-only image
Using a profile model and assuming the spectrum is misaligned only a little
bit (i.e. a couple of pixels from end to end) with respect to the lines of
the detector. The number of sigmas determines the width of the zone to be
masked and the separation is an offset that is added.
Args:
ccd (object): A ccdproc.CCDData instance.
profile_model (object): An astropy.modeling.Model instance. Describes
the spatial profile of the target.
nsigma (float): Number of sigmas. Used to calculate the width of the
target zone to be masked.
separation (float): Additional offset that adds to the width of the
target zone.
Returns:
A ccdproc.CCDData instance with the spectrum masked.
"""
background_ccd = ccd.copy()
spatial_length, dispersion_length = background_ccd.data.shape
target_profiles = []
if 'CompoundModel' in profile_model.__class__.name:
log_spec.debug(profile_model.submodel_names)
for m in range(len(profile_model.submodel_names)):
submodel_name = profile_model.submodel_names[m]
target_profiles.append(profile_model[submodel_name])
else:
target_profiles.append(profile_model)
for target in target_profiles:
target_mean = target.mean.value
target_stddev = target.stddev.value
data_low_lim = np.int(np.max(
[0, target_mean - (nsigma / 2. + separation) * target_stddev]))
data_high_lim = np.int(np.min([spatial_length, int(
target_mean + (nsigma / 2. + separation) * target_stddev)]))
background_ccd.data[data_low_lim:data_high_lim, :] = 0
if False:
plt.title('Background Image')
plt.imshow(background_ccd.data, clim=(0, 50))
plt.show()
return background_ccd