def location(url):
fdata={'Accept':'*/*',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'en-US,en;q=0.8',
'Connection':'keep-alive',
'Content-Length':'29',
'Content-type':'application/x-www-form-urlencoded',
'Cookie':'PHPSESSID=hisbu0rrh09nssn99vckkqr740; __utma=103585558.1324897437.1443987736.1443987736.1443987736.1; __utmb=103585558.2.10.1443987736; __utmc=103585558; __utmz=103585558.1443987736.1.1.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided)',
'Host':'get-site-ip.com',
'Origin':'http://get-site-ip.com',
'Referer':'http://get-site-ip.com/',
'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'}
response=requests.post('http://get-site-ip.com/_pages/_moduler/ajaxSkrivUtIpPaNamn.php',data={'dnsNakeLookUp_In':url})
#print response.content
soup=BeautifulSoup(response.content,"lxml")
#print "Location : "
for i in soup.find_all("div", { "class" :"response"}):
# print i.get_text()
# print i.get_text().split('-')[2].replace(' ','')
return i.get_text().split('-')[2].replace(' ','')
#Finds number of special characters
testextraction.py 文件源码
python
阅读 37
收藏 0
点赞 0
评论 0
评论列表
文章目录