def main():
global executor
try:
session = requests.Session()
executor = ThreadPoolExecutor()
os.makedirs(dlDir, exist_ok=True)
url = 'http://support.netgear.cn/'
with open('netgear_cn_filelist.csv', 'w') as fout:
cw = csv.writer(fout)
cw.writerow(['model', 'fver', 'fname', 'furl', 'fdate', 'fsize', 'sha1', 'md5'])
resp = session.get(url=url)
root = html.fromstring(resp.text)
startProd = 1
prods = root.xpath(".//select[@name='select']/option")
for iProd, prod in enumerate(prods[startProd:], startProd):
# prodText = prod.xpath("./text()")[0].strip()
prodUrl = prod.xpath("./@value")[0].strip()
walkProd(session, urljoin(resp.url, prodUrl))
except BaseException as ex:
traceback.print_exc()
finally:
print('Wait for exeuctor shuddown')
executor.shutdown(True)
python类ThreadPoolExecutor()的实例源码
def main():
os.makedirs(localstor, exist_ok=True)
with open('tsd_dlink_filelist.csv', 'w') as fout:
cw = csv.writer(fout)
cw.writerow(['model', 'rev', 'fw_ver', 'fw_url', 'date', 'fsize', 'sha1', 'md5'])
global executor
executor = futures.ThreadPoolExecutor(None)
models = parse_models()
startI = 0 # next(i for i,sp in enumerate(models) if sp[0]=='DIR' and sp[1]=='845L')
for model in models[startI:]:
pfx,sfx = model[0], model[1]
selectModel(pfx, sfx)
print('wait for Executor shutdown')
executor.shutdown(True)
def main():
global executor
try:
sess = requests.Session()
executor = ThreadPoolExecutor()
os.makedirs(dlDir, exist_ok=True)
with open('tenda_us_filelist.csv', 'w') as fout:
cw = csv.writer(fout)
cw.writerow(['model', 'fver', 'fname', 'furl', 'fdate', 'fsize', 'sha1', 'md5'])
walkSelects()
# walkModels(sess, 'http://www.tendaus.com/Default.aspx?Module=WebsiteEN&Action=DownloadCenter')
# for Id in range(1, 200):
# walkTables(sess, "http://www.tendaus.com/Default.aspx?Module=WebsiteEN&Action=DownloadCenter&Id=%(Id)s"%locals())
except BaseException as ex:
traceback.print_exc()
finally:
print('Wait for exeuctor shutdown')
executor.shutdown(True)
def main():
with open('ca_dlink_filelist.csv', 'w') as fout:
cw = csv.writer(fout)
cw.writerow(['model', 'rev', 'fw_ver', 'fw_url', 'date', 'fsize', 'sha1', 'md5'])
global executor
executor = futures.ThreadPoolExecutor()
d = pq(url='http://support.dlink.ca/AllPro.aspx?type=all')
# all 442 models
models = [_.text_content().strip() for _ in d('tr > td:nth-child(1) > .aRedirect')]
for model in models:
prod_url = 'http://support.dlink.ca/ProductInfo.aspx?m=%s'%parse.quote(model)
crawl_prod(prod_url, model)
print('wait for Executor shutdown')
executor.shutdown(True)
def __init__(self, app_name, args=None, loop=None):
self._app_name = app_name
self._shutting_down = False
self._stats_mgr = None
Option.parse_args(args)
self._loop = loop or asyncio.get_event_loop()
self._loop.set_debug(self.ASYNCIO_DEBUG)
executor = ThreadPoolExecutor(max_workers=self.MAX_DEFAULT_EXECUTOR_THREADS)
self._loop.set_default_executor(executor)
self._init_logging()
self._loop.add_signal_handler(signal.SIGINT, self.shutdown)
self._loop.add_signal_handler(signal.SIGTERM, self.shutdown)
self.logger = logging.getLogger(self._app_name)
def __init__(self, pool, dialect, url, logging_name=None, echo=None,
execution_options=None, loop=None, **kwargs):
self._engine = Engine(
pool, dialect, url, logging_name=logging_name, echo=echo,
execution_options=execution_options, **kwargs)
self._loop = loop
max_workers = None
# https://www.python.org/dev/peps/pep-0249/#threadsafety
if dialect.dbapi.threadsafety < 2:
# This might seem overly-restrictive, but when we instantiate an
# AsyncioResultProxy from AsyncioEngine.execute, subsequent
# fetchone calls could be in different threads. Let's limit to one.
max_workers = 1
self._engine_executor = ThreadPoolExecutor(max_workers=max_workers)
def get_node_health_mt(nodes_dict, check_type="normal", n_threads=8, print_out=False):
"""use multithreading to check each node health
Arguments:
nodes_dict {dict} -- [nodesIP(domainName)->(username, mem, CPU)]
Keyword Arguments:
check_type {str} -- [description] (default: {"normal"})
n_threads {number} -- [description] (default: {8})
"""
with ThreadPoolExecutor(max_workers=n_threads) as executor:
futures = {executor.submit(check_node_health, nodeinfo[0], node, check_type, print_out): node
for node, nodeinfo in nodes_dict.items()}
for future in as_completed(futures):
node = futures[future]
nodeinfo = nodes_dict[node]
result = future.result()
nodes_dict[node] = (nodeinfo[0], result)
# print("{} {}".format(node, nodes_dict[node]))
def __init__(self, gRPC_module, inner_service_port=None):
self.__peer_id = None if ObjectManager().peer_service is None else ObjectManager().peer_service.peer_id
# for peer_service, it refers to peer_inner_service / for rs_service, it refers to rs_admin_service
self.inner_server = grpc.server(futures.ThreadPoolExecutor(max_workers=conf.MAX_WORKERS))
self.outer_server = grpc.server(futures.ThreadPoolExecutor(max_workers=conf.MAX_WORKERS))
# members for private, It helps simplicity of code intelligence
self.__gRPC_module = gRPC_module
self.__port = 0
self.__inner_service_port = inner_service_port
self.__peer_target = None
if inner_service_port is not None: # It means this is Peer's CommonService not RS.
peer_port = inner_service_port - conf.PORT_DIFF_INNER_SERVICE
self.__peer_target = util.get_private_ip() + ":" + str(peer_port)
self.__subscriptions = queue.Queue() # tuple with (channel, stub)
self.__group_id = ""
# broadcast process
self.__broadcast_process = self.__run_broadcast_process()
self.__loop_functions = []
def describe_threaded_runner():
@pytest.fixture(scope='module')
def runner_fixture():
return ThreadedRunner(thread_pool_executor=ThreadPoolExecutor(max_workers=2))
def describe_run_all_threaded_behavior():
def given_many_tasks():
def when_first_task_fails():
def expect_later_tasks_still_run(runner_fixture):
task1_mock, task2_mock = create_task_mock(count=2)
task1_mock.successful = False
with pytest.raises(TaskFailedError):
runner_fixture.run_all([task1_mock, task2_mock])
task1_mock.execute.assert_called_once()
task2_mock.execute.assert_called_once()
def build_tasks_hierarchy(swarmci_config, task_factory):
stages_from_yaml = swarmci_config.pop('stages', None)
if stages_from_yaml is None:
raise SwarmCIError('Did not find "stages" key in the .swarmci file.')
elif type(stages_from_yaml) is not list:
raise SwarmCIError('The value of the "stages" key should be a list in the .swarmci file.')
thread_pool_executor = ThreadPoolExecutor(max_workers=25)
stage_tasks = []
for stage in stages_from_yaml:
job_tasks = []
for job in stage['jobs']:
commands = []
for cmd in job['commands']:
commands.append(task_factory.create(TaskType.COMMAND, cmd=cmd))
job_tasks.append(task_factory.create(TaskType.JOB, job=job, commands=commands))
stage_tasks.append(
task_factory.create(TaskType.STAGE, stage=stage, jobs=job_tasks, thread_pool_executor=thread_pool_executor))
return task_factory.create(TaskType.BUILD, stages=stage_tasks)
run_server.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def run():
np.random.seed(42)
config = ServerConfig.load(('./server.conf',))
if sys.version_info[2] >= 6:
thread_pool = ThreadPoolExecutor(thread_name_prefix='query-thread-')
else:
thread_pool = ThreadPoolExecutor(max_workers=32)
app = Application(config, thread_pool)
if config.ssl_key:
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(config.ssl_chain, config.ssl_key)
app.listen(config.port, ssl_options=ssl_ctx)
else:
app.listen(config.port)
if config.user:
os.setgid(grp.getgrnam(config.user)[2])
os.setuid(pwd.getpwnam(config.user)[2])
if sd:
sd.notify('READY=1')
tokenizer_service = TokenizerService()
tokenizer_service.run()
for language in config.languages:
load_language(app, tokenizer_service, language, config.get_model_directory(language))
sys.stdout.flush()
tornado.ioloop.IOLoop.current().start()
def start_server(riot_api_token, listening_port, max_workers):
"""Starts a server."""
service = MatchFetcher(riot_api_token)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers))
service_pb2.add_MatchFetcherServicer_to_server(service, server)
server.add_insecure_port('[::]:%s' % listening_port)
server.start()
return server, service
def test_explicit_loop_threaded(event_loop):
async with base.CleanModel() as model:
model_name = model.info.name
new_loop = asyncio.new_event_loop()
with ThreadPoolExecutor(1) as executor:
f = executor.submit(
new_loop.run_until_complete,
_deploy_in_loop(new_loop, model_name, model._connector.jujudata))
f.result()
await model._wait_for_new('application', 'ubuntu')
assert 'ubuntu' in model.applications
def multi_thread(self, begin_id):
self.make_id_set(begin_id)
coll = MONGO_CLIENT['kr2']['kr_flashes_multi']
for i in range(20):
t = threading.Thread(target=self.loop_parse_news_flashes, name='thread%s' % i, args=[coll])
t.start()
# pool = ThreadPoolExecutor(64)
# for i in range(16):
# pool.submit(parse_news_flashes)
def main():
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from publicdns.client import PublicDNS
domains = []
filename = os.path.join(os.path.dirname(__file__), 'google_domains.txt')
with open(filename, 'r') as f:
domains = f.read().split('\n')
size = len(domains)
tqdmargs = {
'total': 100,
'unit': 'it',
'unit_scale': True,
'leave': True,
}
with ThreadPoolExecutor(max_workers=4) as pool:
print('- dns.resolver')
started = timeit.default_timer()
resolver = dns_resolver.Resolver()
resolver.nameservers = ['8.8.8.8', '8.8.4.4']
futures = [pool.submit(resolver.query, domains[i % size], 'A')
for i in range(100)]
for _ in tqdm(as_completed(futures), **tqdmargs):
pass
elapsed = timeit.default_timer() - started
print('dns.resolver * 100 - took {}s'.format(elapsed))
with ThreadPoolExecutor(max_workers=4) as pool:
print('- PublicDNS')
started = timeit.default_timer()
client = PublicDNS()
futures = [pool.submit(client.query, domains[i % size], 'A')
for i in range(100)]
for _ in tqdm(as_completed(futures), **tqdmargs):
pass
elapsed = timeit.default_timer() - started
print('\nPublicDNS * 100 - took {}s'.format(elapsed))
def __init__(self):
self.markets = []
self.observers = []
self.depths = {}
self.init_markets(config.markets)
self.init_observers(config.observers)
self.threadpool = ThreadPoolExecutor(max_workers=10)
def __init__(self,
neo4j_client: Neo4jClient,
max_workers: int = None):
self.executor = ThreadPoolExecutor(max_workers=max_workers)
super().__init__(neo4j_client)
def test_wait_for_all():
def f(sleep_time: int):
sleep(sleep_time)
return sleep_time
def calc(fs):
fs_done = wait(fs).done
r = sum(r.result() for r in fs_done)
return r
pool = ThreadPoolExecutor()
fs = [pool.submit(f, arg) for arg in (3, 2, 5)]
result = pool.submit(calc, fs).result()
assert result == 10
def __init__(self, thread_num=2, *args, **kwargs):
self.thread_num = thread_num
self._queue = queue.Queue(maxsize=200)
self.api_no_connection = TdxHq_API()
self._api_worker = Thread(
target=self.api_worker, args=(), name='API Worker')
self._api_worker.start()
self.executor = ThreadPoolExecutor(self.thread_num)
test_control_connection.py 文件源码
项目:deb-python-cassandra-driver
作者: openstack
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def __init__(self):
self.metadata = MockMetadata()
self.added_hosts = []
self.removed_hosts = []
self.scheduler = Mock(spec=_Scheduler)
self.executor = Mock(spec=ThreadPoolExecutor)
self.profile_manager.profiles[EXEC_PROFILE_DEFAULT] = ExecutionProfile(RoundRobinPolicy())