def for_genre(genre,num):
pool = ThreadPool(multiprocessing.cpu_count()-1)
nums = list(range(1,num))
results = pool.starmap(soupit,zip(nums,itertools.repeat(genre)))
pool.close()
pool.join()
#build up the list of urls with the results of all the sub-processes that succeeded in a single list
new_results = []
for j in results:
if j:
for i in j:
new_results.append(i)
pool = ThreadPool(multiprocessing.cpu_count()-1)
pool.starmap(dwnld,zip(enumerate(new_results),itertools.repeat(genre)))
pool.close
pool.close()
python类Pool()的实例源码
def __init__(self, args):
super(Slave, self).__init__()
self._pool=Pool(args.thread_num)
self._timeout=args.int_timeout
self._call_method=getattr(requests,args.int_method)
self._flags=args.int_flags.split(',,')
if args.int_headers!="":
self._headers=json.loads(input2json(args.int_headers))
else:
self._headers={}
if args.int_cookies!='':
cookiesl=args.int_cookies.split(',')
self._cookies={x.split(':')[0]:x.split(':')[1] for x in cookiesl}
else:
self._cookies={}
def del_zabbix_events():
try:
HOST = '172.16.4.93'
PORT = 3306
DB = 'zabbix'
MYSQL = Mysql.MYSQL(USER,PASSWORD,HOST,PORT,DB)
cmd = "select eventid from events order by eventid limit 10000;"
results = MYSQL.Run(cmd)
MYSQL.Close()
def Delete(eventid):
MySql = Mysql.MYSQL(USER,PASSWORD,HOST,PORT,DB)
cmd = "delete from events where eventid=%i" % int(eventid[0])
MySql.Run(cmd)
MySql.Close()
pool = ThreadPool(8)
pool.map(Delete, results)
pool.close()
pool.join()
loging.write('del_last_eventid:%s' %results[-1][0])
except Exception as e:
loging.write(e)
def invoke_mappers(n_mappers, batches):
mapper_outputs = []
logger.info("# of Mappers {}".format(n_mappers))
pool = ThreadPool(n_mappers)
mapper_ids = [i + 1 for i in range(n_mappers)]
invoke_lambda_partial = partial(invoke_lambda,
batches,
mapper_outputs,
mapper_lambda_name)
mappers_executed = 0
while mappers_executed < n_mappers:
nm = min(PARALLEL_LAMBDAS, n_mappers)
results = pool.map(invoke_lambda_partial,
mapper_ids[mappers_executed: mappers_executed + nm])
mappers_executed += nm
pool.close()
pool.join()
logger.info("All the mappers finished")
def convert_to_demo_ids(match_ids, threads):
# Tell the user what is happening
print "Converting Match IDs to Demo IDs"
# Define the number of threads
pool = ThreadPool(threads)
# Calls get_demo_ids() and adds the value returned each call to an array called demo_ids
demo_ids = pool.map(get_demo_ids, match_ids)
pool.close()
pool.join()
# Create an array to add any captured errors to
errors = []
# Find any errors, add them to the errors array, and remove them from demo_ids
for text in demo_ids:
if " " in text:
errors.append(text[1:])
demo_ids.remove(text)
# Print the errors (if there are any)
print_errors(errors)
return demo_ids
def download(demo_ids, threads):
# Convert the DemoIDs to URLs
urls = convert_to_urls(demo_ids)
# Define the number of threads
pool = ThreadPool(threads)
# Make a folder for the event to save the files in
directory = make_dir()
# Calls get() and adds the filesize returned each call to an array called filesizes
filesizes = pool.map(get, urls)
pool.close()
pool.join()
# Create a float to store the filesizes in and add them together
total_file_size = sum(filesizes)
# Print the properly formatted filesize.
print "Successfully transferred %s. Enjoy!" % (format_file_size(total_file_size))
return True
def turn_page(self):
""" Turn menu page """
self.book_menu.set_items({}, 0, self.go_site_playback)
books = self.get_books()
book_list = self.set_books(self.current_page, books)
d = self.book_menu.make_dict(book_list.items)
self.book_menu.set_items(d, 0, self.go_site_playback)
buttons = self.components[1].buttons
size = len(buttons.values())
if size == 0:
return
pool = Pool(size)
pool.map(self.set_image, buttons.values())
pool.close()
pool.join()
self.book_menu.select_by_index(0)
def all_or_nothing(matrix, graph, results):
aux_res = MultiThreadedAoN()
aux_res.prepare(graph, results)
# catch errors
if results.__graph_id__ is None:
raise ValueError('The results object was not prepared. Use results.prepare(graph)')
elif results.__graph_id__ != graph.__id__:
raise ValueError('The results object was prepared for a different graph')
else:
pool = ThreadPool(results.cores)
all_threads = {'count': 0}
report = []
for O in range(matrix.shape[0]):
a = matrix[O, :]
if np.sum(a) > 0:
pool.apply_async(func_assig_thread, args=(O, a, graph, results, aux_res, all_threads, report))
pool.close()
pool.join()
results.link_loads = np.sum(aux_res.temp_link_loads, axis=1)
return report
def doWork(self):
self.emit(SIGNAL("ProgressMaxValue(PyQt_PyObject)"), self.matrix.shape[0])
self.emit(SIGNAL("ProgressValue(PyQt_PyObject)"), 0)
# If we are going to perform All or Nothing
if self.method['algorithm'] == 'AoN':
pool = ThreadPool(self.results.cores)
self.all_threads['count'] = 0
for O in range(self.results.zones):
a = self.matrix[O, :]
if np.sum(a) > 0:
pool.apply_async(self.func_assig_thread, args=(O, a))
pool.close()
pool.join()
self.emit(SIGNAL("ProgressValue(PyQt_PyObject)"), self.matrix.shape[0])
self.results.link_loads = np.sum(self.aux_res.temp_link_loads, axis=1)
self.emit(SIGNAL("ProgressText (PyQt_PyObject)"), "Saving Outputs")
self.emit(SIGNAL("finished_threaded_procedure( PyQt_PyObject )"), None)
def start():
if len(sys.argv[1:]) == 0:
config = utils.load_config()
else:
config = utils.load_config(sys.argv[1])
logger = utils.get_logger()
logger.info('????')
room_count = len(config['ROOM_URLS'])
if room_count == 0:
logger.info('?????????????')
exit(0)
pool = ThreadPool(room_count)
for room_url in config['ROOM_URLS']:
m = Monitor(room_url)
pool.apply_async(m.run)
pool.close()
try:
pool.join()
except KeyboardInterrupt:
logger.warning('????')
exit(1)
def _multiThreadedTest(infiles):
arg1 = []
arg2 = home
for item in infiles:
arg1.append(item)
pool = ThreadPool(len(arg1))
pool.starmap(_csvParse, zip(arg1, repeat(arg2)))
print("Parsed through %d IP addresses." % (len(set(internal_ips + external_ips))))
_blackList(hosts=set(internal_ips + external_ips))
_geolocate(hosts)
#print(privateIP.text)
#_initialize()
#_multiThreadedTest(last30)
#res_list = [x[0] for x in compromise]
#_barChart(yValues=(DATA), xValues=sorted(TITLES),outfile="bar.png")
#text_file = open("badguys.txt", "w")
#for i in biglist:
# text_file.write("%s\n" % (i))
#_pieChart(ports, "Top ports", 10, "topports.png")
#_folium("test.html")
def Proposal_Massive(days_list=None,
proposal_days_interval=1,
CPUs=4,
collection=COLLECTION):
assert isinstance(days_list, list), "Days list must be a list"
days_count = len(days_list)
pool = ThreadPool(CPUs)
try:
print "Creating proposals for days between {}-{} in {}".format(days_list[0], days_list[-1], collection)
pool.map(Proposal_creator, (
(day, proposal_days_interval, idx + 1, days_count, collection)
for idx, day in enumerate(days_list)))
except Exception as e:
print "Thread error at processing '{}'".format(e)
pool.close()
pool.join()
def LaGouSpiderWithKeyWord(position, city):
# ??????
pageCount = SearchPageCount(position, city)
if pageCount == 0:
print('???????????????????')
return
totaldata = DataFrame().T
urls = []
for i in range(0, pageCount):
url = 'http://www.lagou.com/jobs/positionAjax.json?'
params = {'city': city, 'kd': position, 'pn': i+1}
url += parse.urlencode(params)
urls.append(url)
# ??work?
pool = ThreadPool(processes=8)
# ?????rdatas
rdatas = pool.map(get_rdata, urls)
for rdata in rdatas:
totaldata = pd.concat([totaldata, rdata])
totaldata.to_csv('lagou.csv')
def imap(requests, stream=True, pool=None, size=2, exception_handler=None):
"""Concurrently converts a generator object of Requests to
a generator of Responses.
:param requests: a generator of Request objects.
:param stream: If False, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. default is 2
:param exception_handler: Callback function, called when exception occured. Params: Request, Exception
"""
def send(r):
return r.send(stream=stream)
pool = pool if pool else Pool(size)
for request in pool.imap(send, requests):
if request.response is not None:
yield request.response
elif exception_handler:
exception_handler(request, request.exception)
if not pool:
pool.close()
def imap_unordered(requests, stream=True, pool=None, size=2, exception_handler=None):
"""Concurrently converts a generator object of Requests to
a generator of Responses.
:param requests: a generator of Request objects.
:param stream: If False, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. default is 2
:param exception_handler: Callback function, called when exception occured. Params: Request, Exception
"""
def send(r):
return r.send(stream=stream)
pool = pool if pool else Pool(size)
with contextlib.closing(Pool(size)) as pool:
for request in pool.imap_unordered(send, requests):
if request.response is not None:
yield request.response
elif exception_handler:
exception_handler(request, request.exception)
if not pool:
pool.close()
def download(self):
logger.info('[Downloader] takes hand')
self.mkdir(self.resources_folder)
tasks = [
(self.get_filepath(meta, url), url)
for meta, urls in self.resource_bundles
for url in urls
]
with contextlib.closing(Pool(8)) as pool:
results = pool.map(self.downloading, tasks)
status = [ok for ok, _ in results]
fails = [src for ok, src in results if not ok]
logger.info('[Downloader] download %d items (Total: %d)!',
sum(status), len(status))
return sum(status), fails
def postgres_main(ipdict,threads):
printPink("crack postgres now...")
print "[*] start postgres %s" % time.ctime()
starttime=time.time()
global lock
lock = threading.Lock()
global result
result=[]
pool=Pool(threads)
for ip in ipdict['postgres']:
pool.apply_async(func=postgreS,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop crack postgres %s" % time.ctime()
print "[*] crack postgres done,it has Elapsed time:%s " % (time.time()-starttime)
return result
def run(self,ipdict,pinglist,threads,file):
if len(ipdict['ssl']):
printPink("crack ssl now...")
print "[*] start test openssl_heart %s" % time.ctime()
starttime=time.time()
pool=Pool(threads)
for ip in ipdict['ssl']:
pool.apply_async(func=self.openssl_test,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop ssl serice %s" % time.ctime()
print "[*] crack ssl done,it has Elapsed time:%s " % (time.time()-starttime)
for i in xrange(len(self.result)):
self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file):
if len(ipdict['ftp']):
printPink("crack ftp now...")
print "[*] start crack ftp %s" % time.ctime()
starttime=time.time()
pool=Pool(threads)
for ip in ipdict['ftp']:
pool.apply_async(func=self.ftp_l,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop ftp serice %s" % time.ctime()
print "[*] crack ftp done,it has Elapsed time:%s " % (time.time()-starttime)
for i in xrange(len(self.result)):
self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file):
if len(ipdict['mysql']):
printPink("crack mysql now...")
print "[*] start crack mysql %s" % time.ctime()
starttime=time.time()
pool=Pool(threads)
for ip in ipdict['mysql']:
pool.apply_async(func=self.mysq1,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop crack mysql %s" % time.ctime()
print "[*] crack mysql done,it has Elapsed time:%s " % (time.time()-starttime)
for i in xrange(len(self.result)):
self.config.write_file(contents=self.result[i],file=file)
def pop_main(ipdict,threads):
printPink("crack pop now...")
print "[*] start crack pop %s" % time.ctime()
starttime=time.time()
global lock
lock = threading.Lock()
global result
result=[]
pool=Pool(threads)
for ip in ipdict['pop3']:
pool.apply_async(func=pop3_l,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop pop serice %s" % time.ctime()
print "[*] crack pop done,it has Elapsed time:%s " % (time.time()-starttime)
return result
def run(self,ipdict,pinglist,threads,file):
if len(ipdict['mongodb']):
printPink("crack mongodb now...")
print "[*] start crack mongodb %s" % time.ctime()
starttime=time.time()
pool=Pool(threads)
for ip in ipdict['mongodb']:
pool.apply_async(func=self.mongoDB,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop mongoDB serice %s" % time.ctime()
print "[*] crack mongoDB done,it has Elapsed time:%s " % (time.time()-starttime)
for i in xrange(len(self.result)):
self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file):
printPink("crack snmp now...")
print "[*] start crack snmp %s" % time.ctime()
starttime=time.time()
pool=Pool(threads)
for ip in pinglist:
pool.apply_async(func=self.snmp_l,args=(str(ip).split(':')[0],""))
pool.close()
pool.join()
print "[*] stop crack snmp %s" % time.ctime()
print "[*] crack snmp done,it has Elapsed time:%s " % (time.time()-starttime)
for i in xrange(len(self.result)):
self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file):
if len(ipdict['rsync']):
printPink("crack rsync now...")
print "[*] start crack rsync %s" % time.ctime()
starttime=time.time()
pool=Pool(threads)
for ip in ipdict['rsync']:
pool.apply_async(func=self.rsync_creak,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop rsync serice %s" % time.ctime()
print "[*] crack rsync done,it has Elapsed time:%s " % (time.time()-starttime)
for i in xrange(len(self.result)):
self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file):
if len(ipdict['ldap']):
printPink("crack ldap now...")
print "[*] start ldap %s" % time.ctime()
starttime=time.time()
pool=Pool(threads)
for ip in ipdict['ldap']:
pool.apply_async(func=self.ldap_creak,args=(str(ip).split(':')[0],str(ip).split(':')[1]))
pool.close()
pool.join()
print "[*] stop ldap serice %s" % time.ctime()
print "[*] crack ldap done,it has Elapsed time:%s " % (time.time()-starttime)
for i in xrange(len(self.result)):
self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file):
if len(ipdict['smb']):
printPink("crack smb now...")
print "[*] start crack smb serice %s" % time.ctime()
starttime=time.time()
pool=Pool(threads)
for ip in ipdict['smb']:
pool.apply_async(func=self.smb_l,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop smb serice %s" % time.ctime()
print "[*] crack smb done,it has Elapsed time:%s " % (time.time()-starttime)
for i in xrange(len(self.result)):
self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file):
if len(ipdict['vnc']):
printPink("crack vnc now...")
print "[*] start crack vnc %s" % time.ctime()
starttime=time.time()
pool=Pool(threads)
for ip in ipdict['vnc']:
pool.apply_async(func=self.vnc_l,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop vnc serice %s" % time.ctime()
print "[*] crack vnc done,it has Elapsed time:%s " % (time.time()-starttime)
for i in xrange(len(self.result)):
self.config.write_file(contents=self.result[i],file=file)
def postgres_main(ipdict,threads):
printPink("crack postgres now...")
print "[*] start postgres %s" % time.ctime()
starttime=time.time()
global lock
lock = threading.Lock()
global result
result=[]
pool=Pool(threads)
for ip in ipdict['postgres']:
pool.apply_async(func=postgreS,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop crack postgres %s" % time.ctime()
print "[*] crack postgres done,it has Elapsed time:%s " % (time.time()-starttime)
return result
def run(self,ipdict,pinglist,threads,file):
if len(ipdict['ssl']):
printPink("crack ssl now...")
print "[*] start test openssl_heart %s" % time.ctime()
starttime=time.time()
pool=Pool(threads)
for ip in ipdict['ssl']:
pool.apply_async(func=self.openssl_test,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop ssl serice %s" % time.ctime()
print "[*] crack ssl done,it has Elapsed time:%s " % (time.time()-starttime)
for i in xrange(len(self.result)):
self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file):
if len(ipdict['ftp']):
printPink("crack ftp now...")
print "[*] start crack ftp %s" % time.ctime()
starttime=time.time()
pool=Pool(threads)
for ip in ipdict['ftp']:
pool.apply_async(func=self.ftp_l,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop ftp serice %s" % time.ctime()
print "[*] crack ftp done,it has Elapsed time:%s " % (time.time()-starttime)
for i in xrange(len(self.result)):
self.config.write_file(contents=self.result[i],file=file)