def __init__(self, config):
log.debug('Starting KinesisProducer')
self.config = config
self._queue = queue.Queue()
self._closed = False
accumulator = RecordAccumulator(RawBuffer, config)
if config['kinesis_concurrency'] == 1:
client = Client(config)
else:
client = ThreadPoolClient(config)
self._sender = Sender(queue=self._queue,
accumulator=accumulator,
client=client,
partitioner=random_partitioner)
self._sender.daemon = True
self._sender.start()
python类Queue()的实例源码
def test_flush(config):
q = queue.Queue()
accumulator = RecordAccumulator(RawBuffer, config)
client = mock.Mock()
sender = Sender(queue=q, accumulator=accumulator,
client=client, partitioner=partitioner)
sender.flush()
assert not client.put_record.called
accumulator.try_append(b'-')
sender.flush()
expected_record = (b'-\n', 4)
client.put_record.assert_called_once_with(expected_record)
def setup(self, config):
"""
Establish connection to Elasticsearch cluster and start periodic commit.
:param config: Configuration object.
:type config: ``dict``
"""
self.config = config
self.context_size = config.get(helper.CONTEXT_SIZE, 120)
self.elastic_bulk = queue.Queue()
self.elastic = self.config[helper.INJECTOR].get_elasticsearch()
self.helper = self.config[helper.INJECTOR].get_elasticsearch_helper()
self.create_mapping()
thread = threading.Thread(target=self._commit, args=())
thread.daemon = True
thread.start()
self.thread = thread
future_full_pipeline.py 文件源码
项目:deb-python-cassandra-driver
作者: openstack
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def run(self):
futures = queue.Queue(maxsize=121)
self.start_profile()
for i in range(self.num_queries):
if i >= 120:
old_future = futures.get_nowait()
old_future.result()
key = "{}-{}".format(self.thread_num, i)
future = self.run_query(key)
futures.put_nowait(future)
while True:
try:
futures.get_nowait().result()
except queue.Empty:
break
self.finish_profile
def run(self):
futures = queue.Queue(maxsize=121)
self.start_profile()
for i in range(self.num_queries):
if i > 0 and i % 120 == 0:
# clear the existing queue
while True:
try:
futures.get_nowait().result()
except queue.Empty:
break
key = "{0}-{1}".format(self.thread_num, i)
future = self.run_query(key)
futures.put_nowait(future)
while True:
try:
futures.get_nowait().result()
except queue.Empty:
break
self.finish_profile()
def watch_once(self, key, timeout=None, **kwargs):
"""Watch a key and stops after the first event.
:param key: key to watch
:param timeout: (optional) timeout in seconds.
:returns: event
"""
event_queue = queue.Queue()
def callback(event):
event_queue.put(event)
w = watch.Watcher(self, key, callback, **kwargs)
try:
return event_queue.get(timeout=timeout)
except queue.Empty:
raise exceptions.WatchTimedOut()
finally:
w.stop()
def setUp(self):
# Simple model with 1 continuous + 1 discrete + 1 continuous variable.
def model():
p = Variable(torch.Tensor([0.5]))
mu = Variable(torch.zeros(1))
sigma = Variable(torch.ones(1))
x = pyro.sample("x", Normal(mu, sigma)) # Before the discrete variable.
y = pyro.sample("y", Bernoulli(p))
z = pyro.sample("z", Normal(mu, sigma)) # After the discrete variable.
return dict(x=x, y=y, z=z)
self.sites = ["x", "y", "z", "_INPUT", "_RETURN"]
self.model = model
self.queue = Queue()
self.queue.put(poutine.Trace())
def _traces(self, *args, **kwargs):
"""
algorithm entered here
Running until the queue is empty and collecting the marginal histogram
is performing exact inference
:returns: Iterator of traces from the posterior.
:rtype: Generator[:class:`pyro.Trace`]
"""
# currently only using the standard library queue
self.queue = Queue()
self.queue.put(poutine.Trace())
p = poutine.trace(
poutine.queue(self.model, queue=self.queue, max_tries=self.max_tries))
while not self.queue.empty():
tr = p.get_trace(*args, **kwargs)
yield (tr, tr.log_pdf())
def __init__(self, source, buffer_size=2):
assert buffer_size >= 2, "minimum buffer size is 2"
# The effective buffer size is one larger, because the generation
# process will generate one extra element and block until there is room
# in the buffer.
self.buffer = Queue(maxsize=buffer_size - 1)
def populate_buffer():
try:
for item in source:
self.buffer.put((None, item))
except:
self.buffer.put((sys.exc_info(), None))
else:
self.buffer.put(DONE)
thread = threading.Thread(target=populate_buffer)
thread.daemon = True
thread.start()
def run(self):
if not self.containers:
return
queue = Queue()
thread_args = queue, self.log_args
thread_map = build_thread_map(self.containers, self.presenters, thread_args)
for line in consume_queue(queue, self.cascade_stop):
remove_stopped_threads(thread_map)
if not line:
if not thread_map:
# There are no running containers left to tail, so exit
return
# We got an empty line because of a timeout, but there are still
# active containers to tail, so continue
continue
try:
self.output.write(line)
self.output.flush()
except ValueError:
# ValueError: I/O operation on closed file
break
def __init__(self,
logger,
path,
tchannel,
deployment_str,
headers,
timeout_seconds,
reconfigure_interval_seconds):
self.logger = logger
self.path = path
self.tchannel = tchannel
self.deployment_str = deployment_str
self.headers = headers
self.timeout_seconds = timeout_seconds
self.task_queue = queue.Queue()
self.workers = {}
self.reconfigure_signal = threading.Event()
self.reconfigure_interval_seconds = reconfigure_interval_seconds
self.reconfigure_thread = None
def get(self):
'''Get a task from queue when bucket available'''
if self.bucket.get() < 1:
return None
now = time.time()
self.mutex.acquire()
try:
task = self.priority_queue.get_nowait()
self.bucket.desc()
except Queue.Empty:
self.mutex.release()
return None
task.exetime = now + self.processing_timeout
self.processing.put(task)
self.mutex.release()
return task.taskid
def _check_task_done(self):
'''Check status queue'''
cnt = 0
try:
while True:
task = self.status_queue.get_nowait()
# check _on_get_info result here
if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task:
if task['project'] not in self.projects:
continue
project = self.projects[task['project']]
project.on_get_info(task['track'].get('save') or {})
logger.info(
'%s on_get_info %r', task['project'], task['track'].get('save', {})
)
continue
elif not self.task_verify(task):
continue
self.on_task_status(task)
cnt += 1
except Queue.Empty:
pass
return cnt
def init_service(self, endpoint, project, uuid, log_batch_size,
ignore_errors, ignored_tags):
self._errors = queue.Queue()
if self.RP is None:
self.ignore_errors = ignore_errors
self.ignored_tags = ignored_tags
logging.debug('ReportPortal - Init service: endpoint=%s, '
'project=%s, uuid=%s', endpoint, project, uuid)
self.RP = ReportPortalServiceAsync(
endpoint=endpoint,
project=project,
token=uuid,
error_handler=self.async_error_handler,
log_batch_size=log_batch_size
)
else:
logging.debug('The pytest is already initialized')
return self.RP
def watch_once(self, key, timeout=None, **kwargs):
"""
Watch a key and stops after the first event.
If the timeout was specified and event didn't arrived method
will raise ``WatchTimedOut`` exception.
:param key: key to watch
:param timeout: (optional) timeout in seconds.
:returns: ``Event``
"""
event_queue = queue.Queue()
def callback(event):
event_queue.put(event)
watch_id = self.add_watch_callback(key, callback, **kwargs)
try:
return event_queue.get(timeout=timeout)
except queue.Empty:
raise exceptions.WatchTimedOut()
finally:
self.cancel_watch(watch_id)
def _audio_data_generator(buff):
"""A generator that yields all available data in the given buffer.
Args:
buff - a Queue object, where each element is a chunk of data.
Yields:
A chunk of data that is the aggregate of all chunks of data in `buff`.
The function will block until at least one data chunk is available.
"""
while True:
# Use a blocking get() to ensure there's at least one chunk of data
chunk = buff.get()
if not chunk:
# A falsey value indicates the stream is closed.
break
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
data.append(buff.get(block=False))
except queue.Empty:
break
yield b''.join(data)
def _test_remote_commands_async():
u"""
>>> kak = headless()
>>> @Remote.command(kak.pid)
... def write_position(pipe, line, column):
... pipe(utils.join(('exec ', 'a', str(line), ':', str(column), '<esc>'), sep=''))
>>> pipe(kak.pid, 'write-position', 'unnamed0')
>>> time.sleep(0.05)
>>> pipe(kak.pid, 'exec a,<space><esc>', 'unnamed0', sync=True)
>>> time.sleep(0.02)
>>> write_position('unnamed0')
>>> pipe(kak.pid, 'exec \%H', 'unnamed0', sync=True)
>>> Remote.onclient(kak.pid, 'unnamed0')(lambda selection: print(selection))
1:1, 1:5
>>> q = Queue()
>>> Remote.onclient(kak.pid, 'unnamed0', sync=False)(lambda selection: q.put(selection))
>>> print(q.get())
1:1, 1:5
>>> pipe(kak.pid, 'quit!', 'unnamed0')
>>> kak.wait()
0
>>> _fifo_cleanup()
"""
pass
def init(self):
self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore]
self.good = set()
self.broken = {}
self.redirected = {}
# set a timeout for non-responding servers
socket.setdefaulttimeout(5.0)
# create output file
open(path.join(self.outdir, 'output.txt'), 'w').close()
# create queues and worker threads
self.wqueue = queue.Queue()
self.rqueue = queue.Queue()
self.workers = []
for i in range(self.app.config.linkcheck_workers):
thread = threading.Thread(target=self.check_thread)
thread.setDaemon(True)
thread.start()
self.workers.append(thread)
def _audio_data_generator(buff):
"""A generator that yields all available data in the given buffer.
Args:
buff - a Queue object, where each element is a chunk of data.
Yields:
A chunk of data that is the aggregate of all chunks of data in `buff`.
The function will block until at least one data chunk is available.
"""
while True:
# Use a blocking get() to ensure there's at least one chunk of data
chunk = buff.get()
if not chunk:
# A falsey value indicates the stream is closed.
break
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
data.append(buff.get(block=False))
except queue.Empty:
break
yield b''.join(data)
def pull_batch_from_queue(self):
"""
Take a rollout from the queue of the thread runner.
"""
# get top rollout from queue (FIFO)
rollout = self.runner.queue.get(timeout=600.0)
while not rollout.terminal:
try:
# Now, get remaining *available* rollouts from queue and append them into
# the same one above. If queue.Queue(5): len=5 and everything is
# superfast (not usually the case), then all 5 will be returned and
# exception is raised. In such a case, effective batch_size would become
# constants['ROLLOUT_MAXLEN'] * queue_maxlen(5). But it is almost never the
# case, i.e., collecting a rollout of length=ROLLOUT_MAXLEN takes more time
# than get(). So, there are no more available rollouts in queue usually and
# exception gets always raised. Hence, one should keep queue_maxlen = 1 ideally.
# Also note that the next rollout generation gets invoked automatically because
# its a thread which is always running using 'yield' at end of generation process.
# To conclude, effective batch_size = constants['ROLLOUT_MAXLEN']
rollout.extend(self.runner.queue.get_nowait())
except queue.Empty:
break
return rollout
def __init__(self, delegate, address='', port=22, backlog=5, key=None, timeout=None, encoding='ascii', handler=Handler):
threading.Thread.__init__(self, name='sshim.Server')
self.exceptions = queue.Queue()
self.encoding = encoding
self.timeout = timeout
self.counter = Counter()
self.handler = handler
self.delegate = delegate
self.daemon = True
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((address, port))
self.socket.listen(backlog)
logging.info('sshim.Server listening on %s:%d', *self.socket.getsockname())
self.key = key or DEFAULT_KEY
def __init__(self, api_key, user_agent, base_url, entity, project, run_id):
self._endpoint = "{base}/{entity}/{project}/{run}/file_stream".format(
base=base_url,
entity=entity,
project=project,
run=run_id)
self._client = requests.Session()
self._client.auth = ('api', api_key)
self._client.timeout = self.HTTP_TIMEOUT
self._client.headers.update({
'User-Agent': user_agent,
})
self._file_policies = {}
self._queue = queue.Queue()
self._thread = threading.Thread(target=self._thread_body)
# It seems we need to make this a daemon thread to get sync.py's atexit handler to run, which
# cleans this thread up.
self._thread.daemon = True
self._thread.start()
def _thread_body(self):
while True:
event = self._queue.get()
if isinstance(event, EventFinish):
break
self._handle_event(event)
while True:
try:
event = self._queue.get(True, 1)
except queue.Empty:
event = None
if event:
self._handle_event(event)
elif not self._jobs:
# Queue was empty and no jobs left.
break
def record_audio(self, rate, chunk):
"""Opens a recording stream in a context manager."""
# Create a thread-safe buffer of audio data
buff = queue.Queue()
audio_stream = self.audio_interface.open(
format=self.FORMAT,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1, rate=rate,
input=True, frames_per_buffer=chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't overflow
# while the calling thread makes network requests, etc.
stream_callback=functools.partial(self._fill_buffer, buff),
)
yield buff
audio_stream.stop_stream()
audio_stream.close()
# Signal the _audio_data_generator to finish
buff.put(None)
self.audio_interface.terminate() # [END audio_stream]
def __init__(self, delegate, logger=None, address='', port=22, backlog=5, key=None, timeout=None, encoding='ascii', handler=Handler):
threading.Thread.__init__(self, name='sshim.Server')
self.logger = logger
self.exceptions = queue.Queue()
self.encoding = encoding
self.timeout = timeout
self.counter = Counter()
self.handler = handler
self.delegate = delegate
self.daemon = True
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((address, port))
self.socket.listen(backlog)
msg = 'sshim.Server listening on {}:{}'.\
format(self.socket.getsockname()[0], self.socket.getsockname()[1])
self.add_msg(msg)
self.key = key or DEFAULT_KEY
def __init__(self):
self.bid = {
'price': float(16),
'date': str('2015-01-05'),
'time': str(time.mktime(datetime.datetime.now().timetuple())),
'amount': int(10),
'towards': int(1),
'code': str('000001'),
'user': str('root'),
'strategy': str('example01'),
'status': '0x01',
'bid_model': 'strategy',
'amount_model': 'amount',
'order_id': str(random.random())
}
# ???? ??/??/??
self.bid_queue = queue.Queue(maxsize=20)
def __init__(self):
self.bid = {
'price': float(16),
'date': str('2015-01-05'),
'time': str(time.mktime(datetime.datetime.now().timetuple())),
'amount': int(10),
'towards': int(1),
'code': str('000001'),
'user': str('root'),
'strategy': str('example01'),
'status': '0x01',
'bid_model': 'strategy',
'amount_model': 'amount',
'order_id': str(random.random())
}
# ???? ??/??/??
self.bid_queue = queue.Queue(maxsize=20)
def __init__(self, max_concurrent_batches=10, block_on_send=False,
block_on_response=False):
self.max_concurrent_batches = max_concurrent_batches
self.block_on_send = block_on_send
self.block_on_response = block_on_response
session = requests.Session()
session.headers.update({"User-Agent": "libhoney-py/"+VERSION})
self.session = session
# libhoney adds events to the pending queue for us to send
self.pending = queue.Queue(maxsize=1000)
# we hand back responses from the API on the responses queue
self.responses = queue.Queue(maxsize=2000)
self.threads = []
for i in range(self.max_concurrent_batches):
t = threading.Thread(target=self._sender)
t.daemon = True
t.start()
self.threads.append(t)
def __init__(self, endpoint, project, token, api_base="api/v1",
error_handler=None, log_batch_size=20):
"""Init the service class.
Args:
endpoint: endpoint of report portal service.
project: project name to use for launch names.
token: authorization token.
api_base: defaults to api/v1, can be changed to other version.
error_handler: function to be called to handle errors occurred
during items processing (in thread)
"""
super(ReportPortalServiceAsync, self).__init__()
self.error_handler = error_handler
self.log_batch_size = log_batch_size
self.rp_client = ReportPortalService(
endpoint, project, token, api_base)
self.log_batch = []
self.supported_methods = ["start_launch", "finish_launch",
"start_test_item", "finish_test_item", "log"]
self.queue = queue.Queue()
self.listener = QueueListener(self.queue, self.process_item)
self.listener.start()
def __init__(self, fps=22.4, step_mul=1, render_sync=False):
"""Create a renderer for use by humans.
Make sure to call `init` with the game info, or just use `run`.
Args:
fps: How fast should the game be run.
step_mul: How many game steps to take per observation.
render_sync: Whether to wait for the obs to render before continuing.
"""
self._fps = fps
self._step_mul = step_mul
self._render_sync = render_sync
self._obs_queue = queue.Queue()
self._render_thread = threading.Thread(target=self.render_thread,
name="Renderer")
self._render_thread.start()
self._game_times = collections.deque(maxlen=100) # Avg FPS over 100 frames.
self._render_times = collections.deque(maxlen=100)
self._last_time = time.time()
self._last_game_loop = 0
self._name_lengths = {}