def __search(self, titles, year):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'list-drama'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'media'})
r = [dom_parser.parse_dom(i, 'div', attrs={'class': 'media-body'}) for i in r]
r = [(dom_parser.parse_dom(i[0], 'a', req='href'), dom_parser.parse_dom(i[0], 'small', attrs={'class': 'pull-right'})) for i in r if i]
r = [(i[0][0].attrs['href'], i[0][0].content, re.sub('<.+?>|</.+?>', '', i[1][0].content)) for i in r if i[0] and i[1]]
r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
r = [(i[0], i[1], i[2][0] if i[2] else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
评论列表
文章目录