def gethtml(url):
with open('cookies') as f:
cookies = requests.utils.cookiejar_from_dict(pickle.load(f))
session = requests.session()
session.cookies = cookies
del session.cookies['c_visitor']
if not forceusa and localizecookies:
session.cookies['c_locale']={u'Español (Espana)' : 'esES', u'Français (France)' : 'frFR', u'Português (Brasil)' : 'ptBR',
u'English' : 'enUS', u'Español' : 'esLA', u'Türkçe' : 'enUS', u'Italiano' : 'itIT',
u'???????' : 'arME' , u'Deutsch' : 'deDE'}[lang]
if forceusa:
try:
session.cookies['sess_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
except:
sleep(10) # sleep so we don't overload crunblocker
session.cookies['sess_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
parts = urlparse.urlsplit(url)
if not parts.scheme or not parts.netloc:
print 'Apparently not a URL'
sys.exit()
data = {'Referer': 'http://crunchyroll.com/', 'Host': 'www.crunchyroll.com',
'User-Agent': 'Mozilla/5.0 Windows NT 6.1; rv:26.0 Gecko/20100101 Firefox/26.0'}
res = session.get(url, params=data)
res.encoding = 'UTF-8'
return res.text
评论列表
文章目录