def __search(self, title, localtitle, year, content_type):
try:
t = cleantitle.get(title)
tq = cleantitle.get(localtitle)
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
query = urlparse.urljoin(self.base_link, self.search_link)
post = urllib.urlencode({'k': "%s"}) % tq
r = client.request(query, post=post)
r = json.loads(r)
r = [i.get('result') for i in r if i.get('type', '').encode('utf-8') == content_type]
r = [(i.get('url'), i.get('originalTitle'), i.get('title'), i.get('anneeProduction', 0), i.get('dateStart', 0)) for i in r]
r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1] if i[1] else ''), re.sub('<.+?>|</.+?>', '', i[2] if i[2] else ''), i[3] if i[3] else re.findall('(\d{4})', i[4])[0]) for i in r if i[3] or i[4]]
r = sorted(r, key=lambda i: int(i[3]), reverse=True) # with year > no year
r = [i[0] for i in r if i[3] in y and (t.lower() == cleantitle.get(i[1].lower()) or tq.lower() == cleantitle.query(i[2].lower()))][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
评论列表
文章目录