def extract(self, season, result):
self.default_url = result
with concurrent.futures.ThreadPoolExecutor(16) as executor:
# First step, we extract links to all the episodes
episodes, base_url, pages = self.page_worker(
self.default_url
)
futures = []
for page in pages:
self.logger.debug('Processing page {}'.format(page))
futures.append(executor.submit(
self.page_worker, base_url + str(page)
))
results = concurrent.futures.wait(futures)
for completed in results.done:
episodes += completed.result()
# Second step, we get all the available sources.
list(executor.map(self.episode_worker, episodes))
评论列表
文章目录