def call_api(self, method, endpoint, payload):
url = urlparse.urljoin(self.api_base_url, endpoint)
if method == 'POST':
response = requests.post(url, data=payload)
elif method == 'delete':
response = requests.delete(url)
elif method == 'put':
response = requests.put(url, data=payload)
else:
if self.api_key:
payload.update({'api_token': self.api_key})
response = requests.get(url, params=payload)
content = json.loads(response.content)
return content
python类urljoin()的实例源码
def redirect_internal(self, url, fp, errcode, errmsg, headers, data):
if 'location' in headers:
newurl = headers['location']
elif 'uri' in headers:
newurl = headers['uri']
else:
return
void = fp.read()
fp.close()
# In case the server sent a relative URL, join with original:
newurl = basejoin(self.type + ":" + url, newurl)
# For security reasons we do not allow redirects to protocols
# other than HTTP, HTTPS or FTP.
newurl_lower = newurl.lower()
if not (newurl_lower.startswith('http://') or
newurl_lower.startswith('https://') or
newurl_lower.startswith('ftp://')):
raise IOError('redirect error', errcode,
errmsg + " - Redirection to url '%s' is not allowed" %
newurl,
headers)
return self.open(newurl)
def get(self, netloc, ua, timeout):
try:
headers = {'User-Agent': ua, 'Referer': netloc}
result = _basic_request(netloc, headers=headers, timeout=timeout)
match = re.findall('xhr\.open\("GET","([^,]+),', result)
if not match:
return False
url_Parts = match[0].split('"')
url_Parts[1] = '1680'
url = urlparse.urljoin(netloc, ''.join(url_Parts))
match = re.findall('rid=([0-9a-zA-Z]+)', url_Parts[0])
if not match:
return False
headers['Cookie'] = 'rcksid=%s' % match[0]
result = _basic_request(url, headers=headers, timeout=timeout)
return self.getCookieString(result, headers['Cookie'])
except:
return
# not very robust but lazieness...
def movie(self, imdb, title, localtitle, aliases, year):
try:
t = 'http://www.imdb.com/title/%s' % imdb
t = client.request(t, headers={'Accept-Language': 'es-AR'})
t = client.parseDOM(t, 'title')[0]
t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip().encode('utf-8')
q = self.search_link % urllib.quote_plus(t)
q = urlparse.urljoin(self.base_link, q)
r = client.request(q)
r = client.parseDOM(r, 'div', attrs = {'class': 'item'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tt'}), client.parseDOM(i, 'span', attrs = {'class': 'year'})) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
r = [i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
try:
r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
except:
url = None
pass
if (url == None):
url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
return url
except:
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'item-detail'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
try:
r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
except:
url = None
pass
if (url == None):
url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
url = '%s/watch' % url
return url
except:
return
def movie(self, imdb, title, localtitle, aliases, year):
try:
t = cleantitle.get(title)
p = self.post_link % urllib.quote_plus(cleantitle.query(title))
q = urlparse.urljoin(self.base_link, self.search_link)
r = proxy.request(q, 'playing top', post=p, XHR=True)
r = client.parseDOM(r, 'li')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if i[0] and i[1]]
r = [(i[0], re.findall('(.+?)\((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
r = [i for i in r if t == cleantitle.get(i[1]) and str(year) == i[2]]
url = proxy.parse(r[0][0])
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title))
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
try:
r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
except:
url = None
pass
if (url == None):
url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
url = '%s/watch' % url
return url
except:
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title))
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
try:
r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
except:
url = None
pass
if (url == None):
url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
return url
except:
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
try:
r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
except:
url = None
pass
if (url == None):
url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
query = urlparse.urljoin(self.base_link, self.search_link)
query = query % urllib.quote_plus(data['tvshowtitle'])
t = cleantitle.get(data['tvshowtitle'])
r = client.request(query)
r = client.parseDOM(r, 'div', attrs = {'class': 'thumb'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('(\d{4})', i)) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
url = [i[0] for i in r if t in cleantitle.get(i[1]) and ('Season %s' % season) in i[1]][0]
url += '?episode=%01d' % int(episode)
return url
except:
return
def movie(self, imdb, title, localtitle, aliases, year):
try:
query = self.search_mv_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
r = client.request(query, XHR=True)
r = json.loads(r)
t = cleantitle.get(title)
r = [(i['slug'], i['title'], i['year']) for i in r]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == str(i[2])][0]
url = r.encode('utf-8')
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
query = self.search_tv_link % (urllib.quote_plus(tvshowtitle))
query = urlparse.urljoin(self.base_link, query)
r = client.request(query, XHR=True)
r = json.loads(r)
t = cleantitle.get(tvshowtitle)
r = [(i['slug'], i['title'], i['year']) for i in r]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == str(i[2])][0]
url = r.encode('utf-8')
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = self.movie_link % url
url = urlparse.urljoin(self.base_link, url)
return sources
r = client.request(url)
sources.append({'source': 'einthusan', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
return sources
except:
return sources
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(title))
headers = {'Referer': url, 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36'}
cookie = client.request(url, headers=headers, output='cookie')
cookie += client.request(url, headers=headers, cookie=cookie, output='cookie')
client.request(url, headers=headers, cookie=cookie)
cookie += '; '+ headers['Cookie']
headers = {'Referer': url, 'Cookie': cookie, 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36'}
r = client.request(url, headers=headers)
r = client.parseDOM(r, 'div', attrs={'class': 'title'})
r = [zip(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
r = [i[0] for i in r]
r = [i[0] for i in r if (cleantitle.get(title) in cleantitle.get(i[1]))][0]
url = {'imdb': imdb, 'title': title, 'year': year, 'url': r, 'headers': headers}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
headers = eval(data['headers'])
aliases = eval(data['aliases'])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
title = cleantitle.getsearch(title)
query = self.search_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
r = client.request(query, headers=headers, timeout='30', mobile=True)
match = re.compile('alias=(.+?)\'">(.+?)</a>').findall(r)
r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1])) for i in match]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if self.matchAlias(i[1], aliases) and int(season) == int(i[2])][0]
url = {'type': 'tvshow', 'id': r, 'episode': episode, 'season': season, 'headers': headers}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if (self.user == '' or self.password == ''): raise Exception()
if url == None: return
url = urlparse.urljoin(self.base_link, url)
r = client.request(url, headers=self.headers)
r = json.loads(r)['episodes']
r = [(str(i['id']), str(i['season']), str(i['number']), str(i['airdate'])) for i in r]
url = [i for i in r if season == '%01d' % int(i[1]) and episode == '%01d' % int(i[2])]
url += [i for i in r if premiered == i[3]]
url= self.episode_link % url[0][0]
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if (self.user == '' or self.password == ''): raise Exception()
url = urlparse.urljoin(self.base_link, url)
url = client.request(url, headers=self.headers)
url = json.loads(url)['url']
sources.append({'source': 'ororo', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
return sources
except:
return sources
def movie(self, imdb, title, localtitle, aliases, year):
try:
query = self.search_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(title)
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item'})
r = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'cluetip'}, req='href'), dom_parser.parse_dom(i, 'div', attrs={'class': 'description'})) for i in r]
r = [(i[0][0].attrs['href'], dom_parser.parse_dom(i[1], 'h3', attrs={'class': 'text-nowrap'}), dom_parser.parse_dom(i[1], 'div', attrs={'class': 'meta'})) for i in r if i[0] and i[1]]
r = [(i[0], i[1][0].content, dom_parser.parse_dom(i[2], 'span', attrs={'class': 'pull-left'})) for i in r if i[0] and i[1] and i[2]]
r = [(i[0], i[1], re.sub('[^\d]+', '', i[2][0].content)) for i in r if i[2]]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) == t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
year = re.findall('(\d{4})', premiered)[0]
if int(year) >= 2016: raise Exception()
url = re.sub('[^A-Za-z0-9]', '-', data['tvshowtitle']).lower()
url = self.tvsearch_link % (url, data['year'], '%01d' % int(season), '%01d' % int(episode))
r = urlparse.urljoin(self.base_link, url)
r = client.request(r, output='geturl')
if not data['year'] in r: raise Exception()
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
tv_maze = tvmaze.tvMaze()
num = tv_maze.episodeAbsoluteNumber(tvdb, int(season), int(episode))
num = str(num)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = client.parseDOM(r, 'tr', attrs = {'class': ''})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'td', attrs = {'class': 'epnum'})) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [i[0] for i in r if num == i[1]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def sezonlukdizi_tvcache(self):
try:
url = urlparse.urljoin(self.base_link, self.search_link)
result = client.request(url, redirect=False)
if not result:
r = client.request(self.base_link)
r = dom_parser.parse_dom(r, 'script', attrs={'type': 'text/javascript', 'src': re.compile('.*/js/dizi.*')}, req='src')[0]
url = urlparse.urljoin(self.base_link, r.attrs['src'])
result = client.request(url)
result = re.compile('{(.+?)}').findall(result)
result = [(re.findall('u\s*:\s*(?:\'|\")(.+?)(?:\'|\")', i), re.findall('d\s*:\s*(?:\'|\")(.+?)(?:\',|\")', i)) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [(re.compile('/diziler(/.+?)(?://|\.|$)').findall(i[0]), re.sub('&#\d*;', '', i[1])) for i in result]
result = [(i[0][0] + '/', cleantitle.query(i[1])) for i in result if len(i[0]) > 0]
return result
except:
return []
def movie(self, imdb, title, localtitle, aliases, year):
try:
query = self.search_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
c, h = self.__get_cookies(query)
t = cleantitle.get(title)
r = client.request(query, headers=h, cookie=c)
r = client.parseDOM(r, 'div', attrs={'class': 'cell_container'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = self.__search([data['tvshowtitle']] + source_utils.aliases_to_array(eval(data['aliases'])), data['year'], season)
if not url: return
r = client.request(urlparse.urljoin(self.base_link, url))
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'ep_link'})
r = dom_parser.parse_dom(r, 'a', req='href')
r = [(i.attrs['href'], i.content) for i in r if i]
r = [(i[0], re.findall("^(?:episode)\s*(\d+)$", i[1], re.I)) for i in r]
r = [(i[0], i[1][0] if i[1] else '0') for i in r]
r = [i[0] for i in r if int(i[1]) == int(episode)][0]
return source_utils.strip_domain(r)
except:
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
try:
r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
except:
url = None
pass
if (url == None):
url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
url = '%s/watch/' % url
return url
except:
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % (cleantitle.geturl(title.replace('\'', '-'))))
r = client.request(url, timeout='10', headers=headers)
r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
try:
match = [i[0] for i in r if self.matchAlias(i[1], aliases) and year == i[2]][0]
except:
match = [i[0] for i in r if self.matchAlias(i[1], aliases)][0]
url = re.findall('(?://.+?|)(/.+)', match)[0]
url = client.replaceHTMLCodes(url)
return url.encode('utf-8')
except:
return
def __search(self, title, localtitle, year, content_type):
try:
t = cleantitle.get(title)
tq = cleantitle.get(localtitle)
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
query = urlparse.urljoin(self.base_link, self.search_link)
post = urllib.urlencode({'k': "%s"}) % tq
r = client.request(query, post=post)
r = json.loads(r)
r = [i.get('result') for i in r if i.get('type', '').encode('utf-8') == content_type]
r = [(i.get('url'), i.get('originalTitle'), i.get('title'), i.get('anneeProduction', 0), i.get('dateStart', 0)) for i in r]
r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1] if i[1] else ''), re.sub('<.+?>|</.+?>', '', i[2] if i[2] else ''), i[3] if i[3] else re.findall('(\d{4})', i[4])[0]) for i in r if i[3] or i[4]]
r = sorted(r, key=lambda i: int(i[3]), reverse=True) # with year > no year
r = [i[0] for i in r if i[3] in y and (t.lower() == cleantitle.get(i[1].lower()) or tq.lower() == cleantitle.query(i[2].lower()))][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def __search(self, titles, year, season='0'):
try:
query = urlparse.urljoin(self.base_link, self.search_link)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query, post={'do': 'search', 'subaction': 'search', 'search_start': 0, 'full_search': 0, 'result_from': 1, 'story': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'fullstream'})
r = [(dom_parser.parse_dom(i, 'h3', attrs={'class': 'mov-title'}), dom_parser.parse_dom(i, 'div', attrs={'class': 'fullmask'})) for i in r]
r = [(dom_parser.parse_dom(i[0], 'a', req='href'), dom_parser.parse_dom(i[1], 'a', attrs={'href': re.compile('.*/year/\d+')})) for i in r]
r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0].content if i[1] else '0') for i in r if i[0]]
r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:\s*-\s*saison)\s+(\d+)', i[1], re.I)) for i in r]
r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
r = [(i[0], i[1], i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]
return source_utils.strip_domain(r)
except:
return
def __search(self, title, season):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(title)))
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(title)
r = client.request(query)
r = client.parseDOM(r, 'div', attrs={'class': 'moviefilm'})
r = client.parseDOM(r, 'div', attrs={'class': 'movief'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
r = [(i[0][0], i[1][0].lower()) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], i[1], re.findall('(.+?)\s+(?:saison)\s+(\d+)', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = [(i[0], i[1], re.findall('\((.+?)\)$', i[1]), i[2]) for i in r]
r = [(i[0], i[2][0] if len(i[2]) > 0 else i[1], i[3]) for i in r]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(i[2]) == int(season)][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def __search(self, title, year):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(title)))
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(title)
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie_single'})
r = dom_parser.parse_dom(r, 'a', attrs={'class': 'unfilm'}, req='href')
r = [(i.attrs['href'], dom_parser.parse_dom(r, 'div', attrs={'class': 'title'}), dom_parser.parse_dom(r, 'span', attrs={'class': 'post-year'})) for i in r]
r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1][0].content), i[2][0].content if i[2] else '0') for i in r if i[1]]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if t == cleantitle.get(i[1]) and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return