def load_corpus(self, corenlpserver, process=True):
trainfiles = [self.path + '/' + f for f in os.listdir(self.path)]
total = len(trainfiles)
widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.AdaptiveETA(), ' ', pb.Timer()]
pbar = pb.ProgressBar(widgets=widgets, maxval=total, redirect_stdout=True).start()
time_per_abs = []
for current, f in enumerate(trainfiles):
#logging.debug('%s:%s/%s', f, current + 1, total)
print '{}:{}/{}'.format(f, current + 1, total)
did = f
t = time.time()
with open(f, 'r') as f:
article = "<Article>" + f.read() + "</Article>"
soup = BeautifulSoup(article, 'xml')
#doc = soup.find_all("article")
title = soup.ArticleTitle.get_text()
abstract = soup.AbstractText.get_text()
doc_text = title + " " + abstract
newdoc = Document(doc_text, process=False, did=did)
newdoc.sentence_tokenize("biomedical")
newdoc.process_document(corenlpserver, "biomedical")
#logging.info(len(newdoc.sentences))
self.documents[newdoc.did] = newdoc
abs_time = time.time() - t
time_per_abs.append(abs_time)
logging.debug("%s sentences, %ss processing time" % (len(newdoc.sentences), abs_time))
pbar.update(current)
pbar.finish()
abs_avg = sum(time_per_abs)*1.0/len(time_per_abs)
logging.info("average time per abstract: %ss" % abs_avg)
评论列表
文章目录