def __init__(self, *args, **kwargs):
self._generator = Task.__get_generator(self, *args, **kwargs)
self._name = '!' + self._generator.__name__
self._id = id(self)
self._state = None
self._value = None
self._exceptions = []
self._callers = []
self._timeout = None
self._daemon = False
self._complete = None
self._msgs = collections.deque()
self._monitors = set()
self._swap_generator = None
self._hot_swappable = False
if not Task._pycos:
Pycos.instance()
if not Task._pycos:
Task._pycos = Pycos.instance()
self._scheduler = self.__class__._pycos
self._location = None
self._scheduler._add(self)
python类deque()的实例源码
def __init__(self, *args, **kwargs):
self._generator = Task.__get_generator(self, *args, **kwargs)
self._name = '!' + self._generator.__name__
self._id = id(self)
self._state = None
self._value = None
self._exceptions = []
self._callers = []
self._timeout = None
self._daemon = False
self._complete = None
self._msgs = collections.deque()
self._monitors = set()
self._swap_generator = None
self._hot_swappable = False
if not Task._pycos:
Pycos.instance()
if not Task._pycos:
Task._pycos = Pycos.instance()
self._scheduler = self.__class__._pycos
self._location = None
self._scheduler._add(self)
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
def __init__(self, cpe_str):
"""Create a new CPE object that represents the cpe_str
:param str cpe_str: The cpe string
"""
self.part = ""
self.vendor = ""
self.product = ""
self.version = ""
self.update = ""
self.edition = ""
if cpe_str.startswith("cpe:/"):
cpe_str = cpe_str.replace("cpe:/", "")
elif cpe_str.startswith("cpe:2.3:"):
cpe_str = cpe_str.replace("cpe:2.3:", "")
else:
raise CPEException("Invalid cpe string {!r}".format(cpe_str))
parts = deque(cpe_str.split(":"))
to_set = deque(self.attrs)
while len(parts) > 0 and len(to_set) > 0:
next_attr = to_set.popleft()
setattr(self, next_attr, parts.popleft())
def apply_delta(self, delta):
"""Apply delta to our state and return a copy of the
affected object as it was before and after the update, e.g.:
old_obj, new_obj = self.apply_delta(delta)
old_obj may be None if the delta is for the creation of a new object,
e.g. a new application or unit is deployed.
new_obj will never be None, but may be dead (new_obj.dead == True)
if the object was deleted as a result of the delta being applied.
"""
history = (
self.state
.setdefault(delta.entity, {})
.setdefault(delta.get_id(), collections.deque())
)
history.append(delta.data)
if delta.type == 'remove':
history.append(None)
entity = self.get_entity(delta.entity, delta.get_id())
return entity.previous(), entity
def get_entity(
self, entity_type, entity_id, history_index=-1, connected=True):
"""Return an object instance for the given entity_type and id.
By default the object state matches the most recent state from
Juju. To get an instance of the object in an older state, pass
history_index, an index into the history deque for the entity.
"""
if history_index < 0 and history_index != -1:
history_index += len(self.entity_history(entity_type, entity_id))
if history_index < 0:
return None
try:
self.entity_data(entity_type, entity_id, history_index)
except IndexError:
return None
entity_class = get_entity_class(entity_type)
return entity_class(
entity_id, self.model, history_index=history_index,
connected=connected)
def __init__(self, entity_id, model, history_index=-1, connected=True):
"""Initialize a new entity
:param entity_id str: The unique id of the object in the model
:param model: The model instance in whose object tree this
entity resides
:history_index int: The index of this object's state in the model's
history deque for this entity
:connected bool: Flag indicating whether this object gets live updates
from the model.
"""
self.entity_id = entity_id
self.model = model
self._history_index = history_index
self.connected = connected
self.connection = model.connection()
def __init__ (self, sock=None, map=None):
# for string terminator matching
self.ac_in_buffer = ''
# we use a list here rather than cStringIO for a few reasons...
# del lst[:] is faster than sio.truncate(0)
# lst = [] is faster than sio.truncate(0)
# cStringIO will be gaining unicode support in py3k, which
# will negatively affect the performance of bytes compared to
# a ''.join() equivalent
self.incoming = []
# we toss the use of the "simple producer" and replace it with
# a pure deque, which the original fifo was a wrapping of
self.producer_fifo = deque()
asyncore.dispatcher.__init__ (self, sock, map)
def intersperse(its):
iters = collections.deque(iter(i) for i in its)
N = len(iters)
idxs = collections.deque(range(N))
rets = [None] * N
while iters:
try:
i = iters.popleft()
idx = idxs.popleft()
yield next(i)
idxs.append(idx)
iters.append(i)
except StopIteration as e:
rets[idx] = e.value
return tuple(rets)
def _collect_refs(self, item_val, acc=None, no_opt=False):
if acc is None:
acc = deque()
from gramfuzz.fields import Opt
if no_opt and (isinstance(item_val, Opt) or item_val.shortest_is_nothing):
return acc
from gramfuzz.fields import Ref
if isinstance(item_val, Ref):
acc.append(item_val)
if hasattr(item_val, "values"):
for val in item_val.values:
self._collect_refs(val, acc)
return acc
# --------------------------------------
# public, but intended for internal use
# --------------------------------------
def add_definition(self, cat, def_name, def_val, no_prune=False, gram_file="default"):
"""Add a new rule definition named ``def_name`` having value ``def_value`` to
the category ``cat``.
:param str cat: The category to add the rule to
:param str def_name: The name of the rule definition
:param def_val: The value of the rule definition
:param bool no_prune: If the rule should explicitly *NOT*
be pruned even if it has been determined to be unreachable (default=``False``)
:param str gram_file: The file the rule was defined in (default=``"default"``).
"""
self._rules_processed = False
self.add_to_cat_group(cat, gram_file, def_name)
if no_prune:
self.no_prunes.setdefault(cat, {}).setdefault(def_name, True)
if self._staged_defs is not None:
# if we're tracking changes during rule generation, add any new rules
# to _staged_defs so they can be reverted if something goes wrong
self._staged_defs.append((cat, def_name, def_val))
else:
self.defs.setdefault(cat, {}).setdefault(def_name, deque()).append(def_val)
def bfs(graph, func, head, reverse=None):
"""
BREADTH FIRST SEARCH
IF func RETURNS FALSE, THEN NO MORE PATHS DOWN THE BRANCH ARE TAKEN
IT'S EXPECTED func TAKES 3 ARGUMENTS
node - THE CURRENT NODE IN THE
path - PATH FROM head TO node
graph - THE WHOLE GRAPH
todo - WHAT'S IN THE QUEUE TO BE DONE
"""
todo = deque() # LIST OF PATHS
todo.append(Step(None, head))
while True:
path = todo.popleft()
keep_going = func(path.node, Path(path), graph, todo)
if keep_going:
todo.extend(Step(path, c) for c in graph.get_children(path.node))
def __init__(self, args):
self._peer_addr = (args.target_ip, 6778)
self._transport = None
self._send_handle = None
self._stat_handle = None
self._ledbat = LEDBAT()
self._loop = asyncio.get_event_loop()
self._next_id = 1
self._in_flight = set()
self._ret_control = collections.deque(5*[None], 5)
self._start_time = None
self._int_time = None
self._sent_data = 0
self._int_data = 0
self._num_retrans = 0
self._int_retrans = 0
self._delays = collections.deque(10*[None], 10)
def search_node_subg(node, location_to_reads, read_to_locations, seen):
"""
Extract the complete independent subgraph given a node, and add all subgraph nodes into the record.
Returns the nodes in this subgraph and updated record.
"""
if node in seen:
return None, seen
tmp_net=set()
queue=[node]
while(len(queue)>0):
map(tmp_net.add, queue)
map(seen.add, queue)
new_queue=deque([])
for x in queue:
x_reads=location_to_reads[x]
new_queue.extend([next_node for x_read in x_reads for next_node in read_to_locations[x_read] if not next_node in tmp_net])
queue=list(set(new_queue))
subg=list(set(tmp_net))
return subg, seen
def search_node_subg(node, location_to_reads, read_to_locations, seen):
"""
Extract the complete independent subgraph given a node, and add all subgraph nodes into the record.
Returns the nodes in this subgraph and updated record.
"""
if node in seen:
return None, seen
tmp_net=set()
queue=[node]
while(len(queue)>0):
map(tmp_net.add, queue)
map(seen.add, queue)
new_queue=deque([])
for x in queue:
x_reads=location_to_reads[x]
new_queue.extend([next_node for x_read in x_reads for next_node in read_to_locations[x_read] if not next_node in tmp_net])
queue=list(set(new_queue))
subg=list(set(tmp_net))
return subg, seen
def rewind(self):
"""Rewind this cursor to its unevaluated state.
Reset this cursor if it has been partially or completely evaluated.
Any options that are present on the cursor will remain in effect.
Future iterating performed on this cursor will cause new queries to
be sent to the server, even if the resultant data has already been
retrieved by this cursor.
"""
self.__data = deque()
self.__id = None
self.__address = None
self.__retrieved = 0
self.__killed = False
return self
def _object_hook(dictionary):
object_view = ObjectView(dictionary)
stack = deque()
stack.append((None, None, dictionary))
while len(stack):
instance, member_name, dictionary = stack.popleft()
for name, value in dictionary.iteritems():
if isinstance(value, dict):
stack.append((dictionary, name, value))
if instance is not None:
instance[member_name] = ObjectView(dictionary)
return object_view
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self.deque = deque()
self._deque_lock = Lock()
self._connect_socket()
self._socket.setblocking(0)
with self._libevloop._lock:
self._read_watcher = libev.IO(self._socket.fileno(), libev.EV_READ, self._libevloop._loop, self.handle_read)
self._write_watcher = libev.IO(self._socket.fileno(), libev.EV_WRITE, self._libevloop._loop, self.handle_write)
self._send_options_message()
self._libevloop.connection_created(self)
# start the global event loop if needed
self._libevloop.maybe_start()
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self.deque = deque()
self.deque_lock = Lock()
self._connect_socket()
asyncore.dispatcher.__init__(self, self._socket, _dispatcher_map)
self._writable = True
self._readable = True
self._send_options_message()
# start the event loop if needed
self._loop.maybe_start()
def handle_write(self):
while True:
with self.deque_lock:
try:
next_msg = self.deque.popleft()
except IndexError:
self._writable = False
return
try:
sent = self.send(next_msg)
self._readable = True
except socket.error as err:
if (err.args[0] in NONBLOCKING):
with self.deque_lock:
self.deque.appendleft(next_msg)
else:
self.defunct(err)
return
else:
if sent < len(next_msg):
with self.deque_lock:
self.deque.appendleft(next_msg[sent:])
if sent == 0:
return
def SLOPE(self, param):
class Context:
def __init__(self, N):
self.N = N
self.q = deque([], self.N)
self.x = [i for i in range(self.N)]
def handleInput(self, value):
if len(self.q) < self.N:
self.q.append(value)
return 0
self.q.append(value)
z1 = np.polyfit(self.x, self.q, 1)
return z1[0]
ctx = Context(param[1])
result = param[0].apply(ctx.handleInput)
return result
def FORCAST(self, param):
class Context:
def __init__(self, N):
self.N = N
self.q = deque([], self.N)
self.x = [i for i in range(self.N)]
def handleInput(self, value):
if len(self.q) < self.N:
self.q.append(value)
return np.NaN
z1 = np.polyfit(self.x, self.q, 1)
fn = np.poly1d(z1)
y = fn(self.N + 1)
self.q.append(value)
return y
ctx = Context(param[1])
result = param[0].apply(ctx.handleInput)
return result
#????
def append(self, event_key, propagate):
target = event_key.dispatch_target
assert isinstance(target, type), \
"Class-level Event targets must be classes."
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._clslevel[cls] = collections.deque()
self._clslevel[cls].append(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def iterate(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be breadth-first.
"""
# fasttrack for atomic elements like columns
children = obj.get_children(**opts)
if not children:
return [obj]
traversal = deque()
stack = deque([obj])
while stack:
t = stack.popleft()
traversal.append(t)
for c in t.get_children(**opts):
stack.append(c)
return iter(traversal)
def unwrap_order_by(clause):
"""Break up an 'order by' expression into individual column-expressions,
without DESC/ASC/NULLS FIRST/NULLS LAST"""
cols = util.column_set()
stack = deque([clause])
while stack:
t = stack.popleft()
if isinstance(t, ColumnElement) and \
(
not isinstance(t, UnaryExpression) or
not operators.is_ordering_modifier(t.modifier)
):
if isinstance(t, _label_reference):
t = t.element
if isinstance(t, (_textual_label_reference)):
continue
cols.add(t)
else:
for c in t.get_children():
stack.append(c)
return cols