def strmFile(self, i):
try:
name, title, year, imdb, tmdb = i['name'], i['title'], i['year'], i['imdb'], i['tmdb']
sysname, systitle = urllib.quote_plus(name), urllib.quote_plus(title)
transtitle = cleantitle.normalize(title.translate(None, '\/:*?"<>|'))
content = '%s?action=play&name=%s&title=%s&year=%s&imdb=%s&tmdb=%s' % (sys.argv[0], sysname, systitle, year, imdb, tmdb)
folder = lib_tools.make_path(self.library_folder, transtitle, year)
lib_tools.create_folder(folder)
lib_tools.write_file(os.path.join(folder, lib_tools.legal_filename(transtitle) + '.strm'), content)
lib_tools.write_file(os.path.join(folder, 'movie.nfo'), lib_tools.nfo_url('movie', i))
except:
pass
python类quote_plus()的实例源码
def strmFile(self, i):
try:
title, year, imdb, tvdb, season, episode, tvshowtitle, premiered = i['title'], i['year'], i['imdb'], i['tvdb'], i['season'], i['episode'], i['tvshowtitle'], i['premiered']
episodetitle = urllib.quote_plus(title)
systitle, syspremiered = urllib.quote_plus(tvshowtitle), urllib.quote_plus(premiered)
transtitle = cleantitle.normalize(tvshowtitle.translate(None, '\/:*?"<>|'))
content = '%s?action=play&title=%s&year=%s&imdb=%s&tvdb=%s&season=%s&episode=%s&tvshowtitle=%s&date=%s' % (sys.argv[0], episodetitle, year, imdb, tvdb, season, episode, systitle, syspremiered)
folder = lib_tools.make_path(self.library_folder, transtitle, year)
lib_tools.create_folder(folder)
lib_tools.write_file(os.path.join(folder, 'tvshow.nfo'), lib_tools.nfo_url('tv', i))
folder = lib_tools.make_path(self.library_folder, transtitle, year, season)
lib_tools.create_folder(folder)
lib_tools.write_file(os.path.join(folder, lib_tools.legal_filename('%s S%02dE%02d' % (transtitle, int(season), int(episode))) + '.strm'), content)
except:
pass
def worker(self, name, url):
try:
if url.startswith(self.base_link):
url = self.resolve(url)
if not url: raise Exception()
return url
elif not url.startswith('http:'):
url = self.youtube_watch % url
url = self.resolve(url)
if not url: raise Exception()
return url
else:
raise Exception()
except:
query = name + ' trailer'
query = self.search_link % urllib.quote_plus(query)
return self.search(query)
def request(url, check, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, XHR=False, limit=None, referer=None, cookie=None, timeout='30'):
try:
r = client.request(url, close=close, redirect=redirect, proxy=proxy, post=post, headers=headers, mobile=mobile, XHR=XHR, limit=limit, referer=referer, cookie=cookie, timeout=timeout)
if r == None and error == False: return r
if check in str(r) or str(r) == '': return r
proxies = sorted(get(), key=lambda x: random.random())
proxies = sorted(proxies, key=lambda x: random.random())
proxies = proxies[:3]
for p in proxies:
p += urllib.quote_plus(url)
if not post == None: p += urllib.quote_plus('?%s' % post)
r = client.request(p, close=close, redirect=redirect, proxy=proxy, headers=headers, mobile=mobile, XHR=XHR, limit=limit, referer=referer, cookie=cookie, timeout='20')
if check in str(r) or str(r) == '': return r
except:
pass
def movie(self, imdb, title, localtitle, aliases, year):
try:
t = 'http://www.imdb.com/title/%s' % imdb
t = client.request(t, headers={'Accept-Language': 'es-AR'})
t = client.parseDOM(t, 'title')[0]
t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip().encode('utf-8')
q = self.search_link % urllib.quote_plus(t)
q = urlparse.urljoin(self.base_link, q)
r = client.request(q)
r = client.parseDOM(r, 'div', attrs = {'class': 'item'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tt'}), client.parseDOM(i, 'span', attrs = {'class': 'year'})) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
r = [i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
try:
r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
except:
url = None
pass
if (url == None):
url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
return url
except:
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'item-detail'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
try:
r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
except:
url = None
pass
if (url == None):
url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
url = '%s/watch' % url
return url
except:
return
def movie(self, imdb, title, localtitle, aliases, year):
try:
t = cleantitle.get(title)
p = self.post_link % urllib.quote_plus(cleantitle.query(title))
q = urlparse.urljoin(self.base_link, self.search_link)
r = proxy.request(q, 'playing top', post=p, XHR=True)
r = client.parseDOM(r, 'li')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if i[0] and i[1]]
r = [(i[0], re.findall('(.+?)\((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
r = [i for i in r if t == cleantitle.get(i[1]) and str(year) == i[2]]
url = proxy.parse(r[0][0])
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
def searchMovie(self, title, year, aliases):
try:
url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl(title), year)
url = client.request(url, output='geturl')
if url == None:
t = cleantitle.get(title)
q = '%s %s' % (title, year)
q = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(q))
r = client.request(q)
r = client.parseDOM(r, 'div', attrs={'class': 'inner'})
r = client.parseDOM(r, 'div', attrs={'class': 'info'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
r = [(i[0], re.findall('(?:^Watch Movie |^Watch movies |^Watch |)(.+?)\((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and year == i[2]][0]
if url == None: raise Exception()
return url
except:
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
try:
r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
except:
url = None
pass
if (url == None):
url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
return url
except:
return
def movie(self, imdb, title, localtitle, aliases, year):
try:
query = urlparse.urljoin(self.base_link, self.search_link)
query = query % urllib.quote_plus(title)
t = cleantitle.get(title)
r = client.request(query)
r = client.parseDOM(r, 'div', attrs = {'class': 'thumb'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('(\d{4})', i)) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
url = [i[0] for i in r if t in cleantitle.get(i[1]) and year == i[2]][0]
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
query = urlparse.urljoin(self.base_link, self.search_link)
query = query % urllib.quote_plus(data['tvshowtitle'])
t = cleantitle.get(data['tvshowtitle'])
r = client.request(query)
r = client.parseDOM(r, 'div', attrs = {'class': 'thumb'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('(\d{4})', i)) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
url = [i[0] for i in r if t in cleantitle.get(i[1]) and ('Season %s' % season) in i[1]][0]
url += '?episode=%01d' % int(episode)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
query = self.search_tv_link % (urllib.quote_plus(tvshowtitle))
query = urlparse.urljoin(self.base_link, query)
r = client.request(query, XHR=True)
r = json.loads(r)
t = cleantitle.get(tvshowtitle)
r = [(i['slug'], i['title'], i['year']) for i in r]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == str(i[2])][0]
url = r.encode('utf-8')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
headers = eval(data['headers'])
aliases = eval(data['aliases'])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
title = cleantitle.getsearch(title)
query = self.search_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
r = client.request(query, headers=headers, timeout='30', mobile=True)
match = re.compile('alias=(.+?)\'">(.+?)</a>').findall(r)
r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1])) for i in match]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if self.matchAlias(i[1], aliases) and int(season) == int(i[2])][0]
url = {'type': 'tvshow', 'id': r, 'episode': episode, 'season': season, 'headers': headers}
url = urllib.urlencode(url)
return url
except:
return
def movie(self, imdb, title, localtitle, aliases, year):
try:
query = self.search_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(title)
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item'})
r = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'cluetip'}, req='href'), dom_parser.parse_dom(i, 'div', attrs={'class': 'description'})) for i in r]
r = [(i[0][0].attrs['href'], dom_parser.parse_dom(i[1], 'h3', attrs={'class': 'text-nowrap'}), dom_parser.parse_dom(i[1], 'div', attrs={'class': 'meta'})) for i in r if i[0] and i[1]]
r = [(i[0], i[1][0].content, dom_parser.parse_dom(i[2], 'span', attrs={'class': 'pull-left'})) for i in r if i[0] and i[1] and i[2]]
r = [(i[0], i[1], re.sub('[^\d]+', '', i[2][0].content)) for i in r if i[2]]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) == t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
def movie(self, imdb, title, localtitle, aliases, year):
try:
query = self.search_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
c, h = self.__get_cookies(query)
t = cleantitle.get(title)
r = client.request(query, headers=h, cookie=c)
r = client.parseDOM(r, 'div', attrs={'class': 'cell_container'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
try:
r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
except:
url = None
pass
if (url == None):
url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
url = '%s/watch/' % url
return url
except:
return
def __search(self, title, season):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(title)))
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(title)
r = client.request(query)
r = client.parseDOM(r, 'div', attrs={'class': 'moviefilm'})
r = client.parseDOM(r, 'div', attrs={'class': 'movief'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
r = [(i[0][0], i[1][0].lower()) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], i[1], re.findall('(.+?)\s+(?:saison)\s+(\d+)', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = [(i[0], i[1], re.findall('\((.+?)\)$', i[1]), i[2]) for i in r]
r = [(i[0], i[2][0] if len(i[2]) > 0 else i[1], i[3]) for i in r]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(i[2]) == int(season)][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def __search(self, title, year):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(title)))
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(title)
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie_single'})
r = dom_parser.parse_dom(r, 'a', attrs={'class': 'unfilm'}, req='href')
r = [(i.attrs['href'], dom_parser.parse_dom(r, 'div', attrs={'class': 'title'}), dom_parser.parse_dom(r, 'span', attrs={'class': 'post-year'})) for i in r]
r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1][0].content), i[2][0].content if i[2] else '0') for i in r if i[1]]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if t == cleantitle.get(i[1]) and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
def movie(self, imdb, title, localtitle, aliases, year):
try:
query = self.moviesearch_link % urllib.quote_plus(cleantitle.query(title))
query = urlparse.urljoin(self.base_link, query)
result = client.request(query)
result = json.loads(result)
result = [i for i in result['suggestions'] if len(i) > 0]
years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1)]
result = [(i['data'].encode('utf8'), i['value'].encode('utf8')) for i in result]
result = [i for i in result if cleantitle.get(title) in cleantitle.get(i[1])]
result = [i[0] for i in result if any(x in i[1] for x in years)][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
query = self.search_link % (urllib.quote_plus(tvshowtitle))
query = urlparse.urljoin(self.base_link, query)
result = client.request(query)
result = client.parseDOM(result, 'div', attrs={'class': 'movie clearfix'})
result = [(client.parseDOM(i, 'a', ret='href'),
client.parseDOM(i, 'span', attrs={'class': 'title-pl'}),
client.parseDOM(i, 'span', attrs={'class': 'title-en'}),
client.parseDOM(i, 'img', ret='src'),
client.parseDOM(i, 'p'),
client.parseDOM(i, 'p', attrs={'class': 'plot'})) for i in result ]
result = [(i[0][0], u" ".join(i[1] + i[2]), re.findall('(\d{4})', i[4][0])) for i in result]
result = [i for i in result if 'serial' in i[0]]
result = [i for i in result if cleantitle.get(tvshowtitle) in cleantitle.get(i[1])]
years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1)]
result = [i[0] for i in result if any(x in i[2] for x in years)][0]
url = result
return url
except:
return
def do_search(self, title, year, video_type):
try:
url = urlparse.urljoin(self.base_link, self.search_link)
url = url % urllib.quote_plus(cleantitle.query(title))
result = client.request(url)
result = client.parseDOM(result, 'div', attrs={'class': 'item'})
for row in result:
row_type = client.parseDOM(row, 'div', attrs={'class': 'typepost'})[0]
if row_type != video_type:
continue
names = client.parseDOM(row, 'span', attrs={'class': 'tt'})[0]
names = names.split('/')
year_found = client.parseDOM(row, 'span', attrs={'class': 'year'})
if self.name_matches(names, title, year) and (len(year_found) == 0 or year_found[0] == year):
url = client.parseDOM(row, 'a', ret='href')[0]
return urlparse.urljoin(self.base_link, url)
except :
return
def __search(self, titles, year, season='0'):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'featured'})
r = dom_parser.parse_dom(r, 'h2')
r = dom_parser.parse_dom(r, 'a', req='href')
r = [(i.attrs['href'], i.content.lower()) for i in r if i]
r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:season|s)\s*(\d+)', i[1])) for i in r]
r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
r = [(i[0], i[1], i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]
return source_utils.strip_domain(r)
except:
return
def __search(self, titles):
try:
query = self.search_link % urllib.quote_plus(cleantitle.query(titles[0]))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
r = client.request(query)
r = dom_parser.parse_dom(r, 'article')
r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'entry-title'})
r = dom_parser.parse_dom(r, 'a', req='href')
r = [(i.attrs['href'], i.content) for i in r]
r = [(i[0]) for i in r if cleantitle.get(i[1]) in t][0]
return source_utils.strip_domain(r)
except:
return
def __search(self, titles):
try:
query = self.search_link % urllib.quote_plus(cleantitle.query(titles[0]))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'container-search'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie-cat'})
r = dom_parser.parse_dom(r, 'h4', attrs={'class': 'title'})
r = dom_parser.parse_dom(r, 'a', req=['title', 'href'])
r = [(i.attrs['href'], i.attrs['title']) for i in r]
r = [(i[0]) for i in r if cleantitle.get(i[1]) in t][0]
return source_utils.strip_domain(r)
except:
return
def __search(self, titles):
try:
query = self.search_link % urllib.quote_plus(cleantitle.query(titles[0]))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
r = client.request(query, XHR=True)
r = json.loads(r)
r = [(i.get('url'), i.get('name'))for i in r]
r = [(i[0]) for i in r if cleantitle.get(i[1]) in t][0]
return source_utils.strip_domain(r)
except:
return
def __search(self, titles, year):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query, XHR=True, referer=self.base_link)
r = dom_parser.parse_dom(r, 'a', req='href')
r = [(i.attrs['href'], i.content.lower()) for i in r if i]
r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
def __search(self, titles, year):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'list-drama'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'media'})
r = [dom_parser.parse_dom(i, 'div', attrs={'class': 'media-body'}) for i in r]
r = [(dom_parser.parse_dom(i[0], 'a', req='href'), dom_parser.parse_dom(i[0], 'small', attrs={'class': 'pull-right'})) for i in r if i]
r = [(i[0][0].attrs['href'], i[0][0].content, re.sub('<.+?>|</.+?>', '', i[1][0].content)) for i in r if i[0] and i[1]]
r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
r = [(i[0], i[1], i[2][0] if i[2] else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
def __search(self, titles):
try:
query = self.search_link % (urllib.quote_plus(titles[0]))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
r = client.request(query)
r = json.loads(r)
r = [(i.get('id'), i.get('value')) for i in r]
r = [i[0] for i in r if cleantitle.get(i[1]) in t][0]
return r
except:
return
def __search(self, titles, year):
try:
query = self.search_link % (urllib.quote_plus(titles[0]))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query, XHR=True)
if r and r.startswith('{'): r = '[%s]' % r
r = json.loads(r)
r = [(i['url'], i['name']) for i in r if 'name' in i and 'url' in i]
r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})?\)*$', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
url = source_utils.strip_domain(r)
url = url.replace('serien/', '')
return url
except:
return