def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
python类Real()的实例源码
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tiebreaker = next(io_loop._timeout_counter)
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def add_timeout(self, deadline, callback, *args, **kwargs):
# This method could be simplified (since tornado 4.0) by
# overriding call_at instead of add_timeout, but we leave it
# for now as a test of backwards-compatibility.
if isinstance(deadline, numbers.Real):
delay = max(deadline - self.time(), 0)
elif isinstance(deadline, datetime.timedelta):
delay = timedelta_to_seconds(deadline)
else:
raise TypeError("Unsupported deadline %r")
return self.reactor.callLater(
delay, self._run_callback,
functools.partial(wrap(callback), *args, **kwargs))
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tiebreaker = next(io_loop._timeout_counter)
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def ed(entries, contentType, binsAsDict=None, **bins):
"""Create a Categorize that is only capable of being added.
Parameters:
entries (float): the number of entries.
contentType (str): the value's sub-aggregator type (must be provided to determine type for the case when `bins` is empty).
bins (dict from str to :doc:`Container <histogrammar.defs.Container>`): the non-empty bin categories and their values.
"""
if not isinstance(entries, numbers.Real) and entries not in ("nan", "inf", "-inf"):
raise TypeError("entries ({0}) must be a number".format(entries))
if not isinstance(contentType, basestring):
raise TypeError("contentType ({0}) must be a string".format(contentType))
if not all(isinstance(k, basestring) and isinstance(v, Container) for k, v in bins.items()):
raise TypeError("bins ({0}) must be a dict from strings to Containers".format(bins))
if entries < 0.0:
raise ValueError("entries ({0}) cannot be negative".format(entries))
out = Categorize(None, None)
out.entries = float(entries)
if binsAsDict is None:
out.bins = {}
else:
out.bins = binsAsDict
out.bins.update(bins)
out.contentType = contentType
return out.specialize()
def fromJsonFragment(json, nameFromParent):
if isinstance(json, dict) and hasKeys(json.keys(), ["entries", "bins:type", "bins"], ["name", "bins:name"]):
if json["entries"] in ("nan", "inf", "-inf") or isinstance(json["entries"], numbers.Real):
entries = float(json["entries"])
else:
raise JsonFormatException(json, "Categorize.entries")
if isinstance(json.get("name", None), basestring):
name = json["name"]
elif json.get("name", None) is None:
name = None
else:
raise JsonFormatException(json["name"], "Categorize.name")
if isinstance(json["bins:type"], basestring):
contentType = json["bins:type"]
factory = Factory.registered[contentType]
else:
raise JsonFormatException(json, "Categorize.bins:type")
if isinstance(json.get("bins:name", None), basestring):
dataName = json["bins:name"]
elif json.get("bins:name", None) is None:
dataName = None
else:
raise JsonFormatException(json["bins:name"], "Categorize.bins:name")
if isinstance(json["bins"], dict):
bins = dict((k, factory.fromJsonFragment(v, dataName)) for k, v in json["bins"].items())
else:
raise JsonFormatException(json, "Categorize.bins")
out = Categorize.ed(entries, contentType, **bins)
out.quantity.name = nameFromParent if name is None else name
return out.specialize()
else:
raise JsonFormatException(json, "Categorize")
def ed(binWidth, entries, contentType, bins, nanflow, origin):
"""Create a SparselyBin that is only capable of being added.
Parameters:
binWidth (float): the width of a bin.
entries (float): the number of entries.
contentType (str): the value's sub-aggregator type (must be provided to determine type for the case when `bins` is empty).
bins (dict from int to :doc:`Container <histogrammar.defs.Container>`): the non-empty bin indexes and their values.
nanflow (:doc:`Container <histogrammar.defs.Container>`): the filled nanflow bin.
origin (float): the left edge of the bin whose index is zero.
"""
if not isinstance(binWidth, numbers.Real):
raise TypeError("binWidth ({0}) must be a number".format(binWidth))
if not isinstance(entries, numbers.Real) and entries not in ("nan", "inf", "-inf"):
raise TypeError("entries ({0}) must be a number".format(entries))
if not isinstance(contentType, basestring):
raise TypeError("contentType ({0}) must be a string".format(contentType))
if not isinstance(bins, dict) or not all(isinstance(k, (int, long)) and isinstance(v, Container) for k, v in bins.items()):
raise TypeError("bins ({0}) must be a map from 64-bit integers to Containers".format(bins))
if not isinstance(nanflow, Container):
raise TypeError("nanflow ({0}) must be a Container".format(nanflow))
if not isinstance(origin, numbers.Real):
raise TypeError("origin ({0}) must be a number".format(origin))
if entries < 0.0:
raise ValueError("entries ({0}) cannot be negative".format(entries))
if binWidth <= 0.0:
raise ValueError("binWidth ({0}) must be greater than zero".format(binWidth))
out = SparselyBin(binWidth, None, None, nanflow, origin)
out.entries = float(entries)
out.contentType = contentType
out.bins = bins
return out.specialize()
def __init__(self, binWidth, quantity, value=Count(), nanflow=Count(), origin=0.0):
"""Create a SparselyBin that is capable of being filled and added.
Parameters:
binWidth (float): the width of a bin; must be strictly greater than zero.
quantity (function returning float): computes the quantity of interest from the data.
value (:doc:`Container <histogrammar.defs.Container>`): generates sub-aggregators to put in each bin.
nanflow (:doc:`Container <histogrammar.defs.Container>`): a sub-aggregator to use for data whose quantity is NaN.
origin (float): the left edge of the bin whose index is 0.
Other parameters:
entries (float): the number of entries, initially 0.0.
bins (dict from int to :doc:`Container <histogrammar.defs.Container>`): the map, probably a hashmap, to fill with values when their `entries` become non-zero.
"""
if not isinstance(binWidth, numbers.Real):
raise TypeError("binWidth ({0}) must be a number".format(binWidth))
if value is not None and not isinstance(value, Container):
raise TypeError("value ({0}) must be a Container".format(value))
if not isinstance(nanflow, Container):
raise TypeError("nanflow ({0}) must be a Container".format(nanflow))
if not isinstance(origin, numbers.Real):
raise TypeError("origin ({0}) must be a number".format(origin))
if binWidth <= 0.0:
raise ValueError("binWidth ({0}) must be greater than zero".format(binWidth))
self.binWidth = binWidth
self.entries = 0.0
self.quantity = serializable(quantity)
self.value = value
if value is not None:
self.contentType = value.name
self.bins = {}
self.nanflow = nanflow.copy()
self.origin = origin
super(SparselyBin, self).__init__()
self.specialize()
def test_floats(self):
for t in sctypes['float']:
assert_(isinstance(t(), numbers.Real),
"{0} is not instance of Real".format(t.__name__))
assert_(issubclass(t, numbers.Real),
"{0} is not subclass of Real".format(t.__name__))
assert_(not isinstance(t(), numbers.Rational),
"{0} is instance of Rational".format(t.__name__))
assert_(not issubclass(t, numbers.Rational),
"{0} is subclass of Rational".format(t.__name__))
def test_complex(self):
for t in sctypes['complex']:
assert_(isinstance(t(), numbers.Complex),
"{0} is not instance of Complex".format(t.__name__))
assert_(issubclass(t, numbers.Complex),
"{0} is not subclass of Complex".format(t.__name__))
assert_(not isinstance(t(), numbers.Real),
"{0} is instance of Real".format(t.__name__))
assert_(not issubclass(t, numbers.Real),
"{0} is subclass of Real".format(t.__name__))
def real_check(*args):
from numbers import Real
func = inspect.stack()[2][3]
for var in args:
if not isinstance(var, Real):
raise RealError('Function %s expected real number, %s got instead.'
% (func, type(var).__name__))
def _operator_fallbacks(monomorphic_operator, fallback_operator):
def forward(a, b):
if isinstance(b, (jsint, Fraction)):
return monomorphic_operator(a, b)
elif isinstance(b, float):
return fallback_operator(float(a), b)
elif isinstance(b, complex):
return fallback_operator(complex(a), b)
else:
return NotImplemented
forward.__name__ = '__' + fallback_operator.__name__ + '__'
forward.__doc__ = monomorphic_operator.__doc__
def reverse(b, a):
if isinstance(a, numbers.Rational):
# Includes ints.
return monomorphic_operator(a, b)
elif isinstance(a, numbers.Real):
return fallback_operator(float(a), float(b))
elif isinstance(a, numbers.Complex):
return fallback_operator(complex(a), complex(b))
else:
return NotImplemented
reverse.__name__ = '__r' + fallback_operator.__name__ + '__'
reverse.__doc__ = monomorphic_operator.__doc__
return forward, reverse
def test_timer(self):
with timer() as t:
j = 1
for i in range(1000):
j *= i
self.assertTrue( isinstance( t, timer ) )
self.assertTrue( isinstance( t.start_time, numbers.Real ) )
self.assertTrue( isinstance( t.end_time, numbers.Real ) )
self.assertEqual( t.time_taken, t.end_time - t.start_time )
def sqrt(x: Real) -> Real:
"""
Return the square root of a positive number.
"""
return _math.sqrt(x)
def exp(x: Real) -> Real:
"""
Return the exponential of a number.
"""
return _math.exp(x)
def log(x: Real) -> Real:
"""
Return the natural logarithm of x.
Aliases: log | ln
"""
return _math.log(x)
def log10(x: Real) -> Real:
"""
Return the logarithm of x in base 10.
"""
return _math.log10(x)
def log2(x: Real) -> Real:
"""
Return the logarithm of x in base 2.
"""
return _math.log2(x)