def open_search(self, parent_window, destination=None):
self.titles = [""]
self.ids = [""]
if self.url.find('%s') > 0:
self.url = self.url % self.title
self.url = string.replace(self.url, ' ', '%20')
else:
if not self.usepostrequest:
self.url = string.replace(self.url + self.title, ' ', '%20')
try:
url = self.url.encode(self.encode)
except UnicodeEncodeError:
url = self.url.encode('utf-8')
self.progress.set_data(parent_window, _("Searching"), _("Wait a moment"), True)
if self.usepostrequest:
postdata = self.get_postdata()
retriever = Retriever(url, parent_window, self.progress, destination, useurllib2=self.useurllib2, postdata=postdata)
else:
retriever = Retriever(url, parent_window, self.progress, destination, useurllib2=self.useurllib2)
retriever.start()
while retriever.isAlive():
self.progress.pulse()
if self.progress.status:
retriever.join()
while gtk.events_pending():
gtk.main_iteration()
try:
if retriever.exception is None:
if destination:
# caller gave an explicit destination file
# don't care about the content
return True
if retriever.html:
ifile = file(retriever.html[0], 'rb')
try:
self.page = ifile.read()
finally:
ifile.close()
# check for gzip compressed pages before decoding to unicode
if len(self.page) > 2 and self.page[0:2] == '\037\213':
self.page = gutils.decompress(self.page)
self.page = self.page.decode(self.encode, 'replace')
else:
return False
else:
self.progress.hide()
gutils.urllib_error(_("Connection error"), parent_window)
return False
except IOError:
log.exception('')
finally:
urlcleanup()
return True
评论列表
文章目录