def bg_lum_hi(pp, qq):
# Tested 2011-08-31
"""Exact deprojection of Sersic profile using Meijer G function as
described by Baes + Gentile 1009.4713. Use formula valid for half
integers"""
if not (pp==int(pp) and qq==int(qq)): raise RuntimeError
if not (qq == 1 or qq == 2): raise RuntimeError
pp, qq = int(pp), int(qq)
mm = (1.0*pp)/qq
i0, bb = bg_constants(pp, qq)
# a and b vectors are specified: [[num1, num2, num3],[denom1,denom2,denom3]]
avect = [[], []]
nums = range(1,2*pp/qq)
bvect = [[xx/(2.0*mm) for xx in nums] + [0.5], []]
reff = 1.0
factor = 2*i0*np.sqrt(mm)/(reff*(2*np.pi)**mm)
def lum(rr):
if np.iterable(rr): return np.array([lum(r) for r in rr])
ss = rr/reff
zz = (bb/(2*mm))**(2*mm) * ss**2
return (factor/ss)*mpmath.meijerg(avect, bvect, zz)
return lum
##################################################
## The big money function is defined right here! The name conforms to
## naming conventions used throughout this file, but is nearly useless
## to users. So make it available as luminosity() to users via a line
## in __init__.py since it's likely the only function any user will
## care about.
python类iterable()的实例源码
def bg_lum(pp, qq, reff=1.0, lum=1.0):
# Tested 2011-08-31
"""Exact deprojection of Sersic profile using Meijer G function as
described by Baes + Gentile arxiv:1009.4713.
pp and qq are the numerator and denominator of the Sersic index
(both integers) so that n=pp/qq,
reff is the projected half-light radius
lum is the total luminosity.
This returns a function that takes a radius and returns a
luminosity density.
>>> lum = luminosity(5,3)
>>> lum(1.1)
>>> lum([1.1, 2.2, 3.3])
"""
if not (pp==int(pp) and qq==int(qq)): raise RuntimeError
pp, qq = int(pp), int(qq)
i0, bb = bg_constants(pp, qq)
# Solution gets slow for larger p,q, so make sure that fraction is reduced
the_gcf = euclid_gcf(pp,qq)
if the_gcf != 1: return bg_lum(pp/the_gcf, qq/the_gcf)
# a and b vectors are specified: [[num1, num2, num3],[denom1,denom2,denom3]]
avect = [[], [xx/(1.0*qq) for xx in range(1,qq)]]
bvect = [[xx/(2.0*pp) for xx in range(1,2*pp)] +
[xx/(2.0*qq) for xx in range(1,2*qq,2)], []]
factor = 2*i0*np.sqrt(pp*qq)/(reff*(2*np.pi)**pp)
def luminosity(rr):
if np.iterable(rr): return np.array([luminosity(r) for r in rr])
ss = rr/reff
zz = (bb/(2*pp))**(2*pp) * ss**(2*qq)
return lum*((factor/ss)*mpmath.meijerg(avect, bvect, zz))
return luminosity
def bm_bn_estimate(nn):
"Guess for bn constant defined by B+M"
if np.iterable(nn): return np.array([bm_bn_estimate(n) for n in nn])
est = 0.87*nn-0.15
if nn < 0.2: est = 0.01*(nn/0.1)**4
return est
def bg_lum_hi(pp, qq):
# Tested 2011-08-31
"""Exact deprojection of Sersic profile using Meijer G function as
described by Baes + Gentile 1009.4713. Use formula valid for half
integers"""
if not (pp==int(pp) and qq==int(qq)): raise RuntimeError
if not (qq == 1 or qq == 2): raise RuntimeError
pp, qq = int(pp), int(qq)
mm = (1.0*pp)/qq
i0, bb = bg_constants(pp, qq)
# a and b vectors are specified: [[num1, num2, num3],[denom1,denom2,denom3]]
avect = [[], []]
nums = range(1,2*pp/qq)
bvect = [[xx/(2.0*mm) for xx in nums] + [0.5], []]
reff = 1.0
factor = 2*i0*np.sqrt(mm)/(reff*(2*np.pi)**mm)
def lum(rr):
if np.iterable(rr): return np.array([lum(r) for r in rr])
ss = rr/reff
zz = (bb/(2*mm))**(2*mm) * ss**2
return (factor/ss)*mpmath.meijerg(avect, bvect, zz)
return lum
##################################################
## The big money function is defined right here! The name conforms to
## naming conventions used throughout this file, but is nearly useless
## to users. So make it available as luminosity() to users via a line
## in __init__.py since it's likely the only function any user will
## care about.
def bg_lum(pp, qq, reff=1.0, lum=1.0):
# Tested 2011-08-31
"""Exact deprojection of Sersic profile using Meijer G function as
described by Baes + Gentile arxiv:1009.4713.
pp and qq are the numerator and denominator of the Sersic index
(both integers) so that n=pp/qq,
reff is the projected half-light radius
lum is the total luminosity.
This returns a function that takes a radius and returns a
luminosity density.
>>> lum = luminosity(5,3)
>>> lum(1.1)
>>> lum([1.1, 2.2, 3.3])
"""
if not (pp==int(pp) and qq==int(qq)): raise RuntimeError
pp, qq = int(pp), int(qq)
i0, bb = bg_constants(pp, qq)
# Solution gets slow for larger p,q, so make sure that fraction is reduced
the_gcf = euclid_gcf(pp,qq)
if the_gcf != 1: return bg_lum(pp/the_gcf, qq/the_gcf)
# a and b vectors are specified: [[num1, num2, num3],[denom1,denom2,denom3]]
avect = [[], [xx/(1.0*qq) for xx in range(1,qq)]]
bvect = [[xx/(2.0*pp) for xx in range(1,2*pp)] +
[xx/(2.0*qq) for xx in range(1,2*qq,2)], []]
factor = 2*i0*np.sqrt(pp*qq)/(reff*(2*np.pi)**pp)
def luminosity(rr):
if np.iterable(rr): return np.array([luminosity(r) for r in rr])
ss = rr/reff
zz = (bb/(2*pp))**(2*pp) * ss**(2*qq)
return lum*((factor/ss)*mpmath.meijerg(avect, bvect, zz))
return luminosity
def init_mps_random(nsites, physdim, bonddim=1, left_label='left',
right_label='right', phys_label='phys'):
"""
Create an MPS with `nsites` sites and random tensors with physical
dimensions given by `physdim` and bond dimensions given by
`bonddim`. Open boundary conditions are used. The MPS is not normalized.
Parameters
----------
nsites : int
physdim : int or list of ints
bonddim : int or list of ints, optional
The nth element of `bonddim` determines the right and left index of
the tensors at sites n and n+1, respectively. The length of `bonddim`
should be `nsites`-1. If `bonddim` is an int this is this is used for
all bonds.
left_label : str
right_label : str
phys_label : str
"""
if not np.iterable(physdim):
physdim = [physdim] * nsites
if not np.iterable(bonddim):
bonddim = [bonddim] * (nsites - 1)
bonddim = [1] + bonddim + [1]
tensors = []
for i in range(nsites):
rt = tnc.Tensor(np.random.rand(
physdim[i], bonddim[i], bonddim[i + 1]),
[phys_label, left_label, right_label])
# Normalize matrix to avoid norm blowing up
U, S, V = tnc.tensor_svd(rt, [phys_label, left_label])
S.data = S.data / S.data[0, 0]
rt = U["svd_in",] * S["svd_out",]
rt = rt["svd_in",] * V["svd_out",]
tensors.append(rt)
return onedim.MatrixProductState(tensors, left_label=left_label,
right_label=right_label, phys_label=phys_label)
def init_mps_allzero(nsites, physdim, left_label='left',
right_label='right', phys_label='phys'):
"""
Create an MPS with `nsites` sites in the "all zero" state |00..0>.
Parameters
----------
nsites : int
physdim : int or list of ints
left_label : str
right_label : str
phys_label : str
"""
if not np.iterable(physdim):
physdim = [physdim] * nsites
tensors = []
for j in range(nsites):
t = np.zeros(physdim[j])
t[0] = 1.0
t = tnc.Tensor(t.reshape(physdim[j], 1, 1), [phys_label, left_label,
right_label])
tensors.append(t)
return onedim.MatrixProductState(tensors, left_label=left_label,
right_label=right_label, phys_label=phys_label)
def init_mps_logical(nsites, basis_state, physdim, left_label='left',
right_label='right', phys_label='phys'):
"""
Create an MPS with `nsites` sites in the logical basis state |ijk..l>.
Parameters
----------
nsites : int
basis_state : int or list of ints
Site `i` will be in the state |`basis_state[i]`> (or simply
|`basis_state`> if a single int is provided).
physdim : int or list of ints
left_label : str
right_label : str
phys_label : str
"""
if not np.iterable(physdim):
physdim = [physdim] * nsites
tensors = []
for j in range(nsites):
t = np.zeros(physdim[j])
t[basis_state[j]] = 1.0
t = tnc.Tensor(t.reshape(physdim[j], 1, 1), [phys_label, left_label,
right_label])
tensors.append(t)
return onedim.MatrixProductState(tensors, left_label=left_label,
right_label=right_label, phys_label=phys_label)
indexing.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 40
收藏 0
点赞 0
评论 0
def __iter__(self):
raise NotImplementedError('ix is not iterable')
indexing.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def _should_validate_iterable(self, axis=0):
""" return a boolean whether this axes needs validation for a passed
iterable
"""
ax = self.obj._get_axis(axis)
if isinstance(ax, MultiIndex):
return False
elif ax.is_floating():
return False
return True
indexing.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
elif is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# GH 7349
# possibly convert a list-like into a nested tuple
# but don't convert a list-like of tuples
if isinstance(labels, MultiIndex):
if (not isinstance(key, tuple) and len(key) > 1 and
not isinstance(key[0], tuple)):
key = tuple([key])
# an iterable multi-selection
if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [slice(None)] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._has_valid_type(key, axis)
return self._get_label(key, axis=axis)
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __init__(self, longitude, latitude, age=0., sigma_age=0.0, **kwargs):
if np.iterable(sigma_age) == 1:
assert len(sigma_age) == 2 # upper and lower bounds
self._age_type = 'uniform'
else:
self._age_type = 'gaussian'
self._age = age
self._sigma_age = sigma_age
super(PaleomagneticPole, self).__init__(
longitude, latitude, 1.0, **kwargs)
def checktype(value,type_):
"""Check value against the type spec. If everything
is OK, this just returns the value itself.
If the types don't check out, an exception is thrown."""
# True skips any check
if type_ is True:
return value
# types are checked using isinstance
if type(type_)==type:
if not isinstance(value,type_):
raise CheckError("isinstance failed",value,"of type",type(value),"is not of type",type_)
return value
# for a list, check that all elements of a collection have a type
# of some list element, allowing declarations like [str] or [str,unicode]
# no recursive checks right now
if type(type_)==list:
if not numpy.iterable(value):
raise CheckError("expected iterable",value)
for x in value:
if not reduce(max,[isinstance(x,t) for t in type_]):
raise CheckError("element",x,"of type",type(x),"fails to be of type",type_)
return value
# for sets, check membership of the type in the set
if type(type_)==set:
for t in type_:
if isinstance(value,t): return value
raise CheckError("set membership failed",value,type_,var=var) # FIXME var?
# for tuples, check that all conditions are satisfied
if type(type_)==tuple:
for t in type_:
checktype(value,type_)
return value
# callables are just called and should either use assertions or
# explicitly raise CheckError
if callable(type_):
type_(value)
return value
# otherwise, we don't understand the type spec
raise Exception("unknown type spec: %s"%type_)
def checktype(value,type_):
"""Check value against the type spec. If everything
is OK, this just returns the value itself.
If the types don't check out, an exception is thrown."""
# True skips any check
if type_ is True:
return value
# types are checked using isinstance
if type(type_)==type:
if not isinstance(value,type_):
raise CheckError("isinstance failed",value,"of type",type(value),"is not of type",type_)
return value
# for a list, check that all elements of a collection have a type
# of some list element, allowing declarations like [str] or [str,unicode]
# no recursive checks right now
if type(type_)==list:
if not numpy.iterable(value):
raise CheckError("expected iterable",value)
for x in value:
if not reduce(max,[isinstance(x,t) for t in type_]):
raise CheckError("element",x,"of type",type(x),"fails to be of type",type_)
return value
# for sets, check membership of the type in the set
if type(type_)==set:
for t in type_:
if isinstance(value,t): return value
raise CheckError("set membership failed",value,type_,var=var) # FIXME var?
# for tuples, check that all conditions are satisfied
if type(type_)==tuple:
for t in type_:
checktype(value,type_)
return value
# callables are just called and should either use assertions or
# explicitly raise CheckError
if callable(type_):
type_(value)
return value
# otherwise, we don't understand the type spec
raise Exception("unknown type spec: %s"%type_)
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def quantile(x, q, weights=None):
"""
Like numpy.percentile, but:
* Values of q are quantiles [0., 1.] rather than percentiles [0., 100.]
* scalar q not supported (q must be iterable)
* optional weights on x
"""
if weights is None:
return np.percentile(x, [100. * qi for qi in q])
else:
idx = np.argsort(x)
xsorted = x[idx]
cdf = np.add.accumulate(weights[idx])
cdf /= cdf[-1]
return np.interp(q, cdf, xsorted).tolist()
def slice_templates_by_channel(self, channels):
if not np.iterable(channels):
channels = [channels]
result = []
for t in self.get():
if t.channel in [channels]:
result += [t]
return result