def get(self, url, proxy=None):
if proxy:
proxy = urllib2.ProxyHandler({'http': proxy})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
try:
response = urllib2.urlopen(url)
except HTTPError, e:
resp = e.read()
self.status_code = e.code
except URLError, e:
resp = e.read()
self.status_code = e.code
else:
self.status_code = response.code
resp = response.read()
return resp
python类URLError()的实例源码
def run(self):
data = self.getData()
value = {
data: {
"type": self.data_type
}
}
json_data = json.dumps(value)
post_data = json_data.encode('utf-8')
headers = {'Content-Type': 'application/json'}
try:
request = urllib2.Request('{}/hippocampe/api/v1.0/{}'.format(self.url, self.service), post_data, headers)
response = urllib2.urlopen(request)
report = json.loads(response.read())
self.report(report)
except urllib2.HTTPError:
self.error("Hippocampe: " + str(sys.exc_info()[1]))
except urllib2.URLError:
self.error("Hippocampe: service is not available")
except Exception as e:
self.unexpectedError(e)
def downloadFilesSave(links, fileFormat): # main function
if (links == 'EMPTY'): # if links list is empty
return ' NO LINKS FOUND !'
for link in links:
name = random.randint(0, 10000001)
if (name in os.listdir(os.getcwd())): # random name to files
name = random.randint(0, 10000001)
if (format not in ['zip', 'png', 'jpg', 'jpeg', 'tiff', 'bmp', 'svg', 'gif']):
try:
saveFile=open(str(name)+'.' + fileFormat, 'w')
saveFile.write(urllib2.urlopen(link).read())
saveFile.close()
except urllib2.URLError:
pass
else:
try:
saveFile=open(str(name)+'.' + fileFormat, 'wb')
saveFile.write(urllib2.urlopen(link).read())
saveFile.close()
except urllib2.URLError:
pass
return ' {} DOWNLOADS SUCCESSFULL YET !'.format(len(os.listdir(os.getcwd())))
def send_result(email, result, title, urn):
"""
Args:
email (str): address to send the results
result (obj): results to send
title (str):
urn (str): uniform resource name
Returns:
str: response from endpoint
"""
url = 'https://mongoaud.it/results'
headers = {'Content-type': 'application/json',
'Accept': 'application/json'}
values = {'email': email, 'result': result, 'title': title, 'urn': urn, 'date': get_date()}
try:
req = urllib2.Request(url, json.dumps(values), headers)
response = urllib2.urlopen(req)
return response.read()
except (urllib2.HTTPError, urllib2.URLError) as exc:
return "Sadly enough, we are having technical difficulties at the moment, " \
"please try again later.\n\n%s" % str(exc)
def check_version(version):
# if application is binary then check for latest version
if getattr(sys, 'frozen', False):
try:
url = "https://api.github.com/repos/stampery/mongoaudit/releases/latest"
req = urllib2.urlopen(url)
releases = json.loads(req.read())
latest = releases["tag_name"]
if version < latest:
print("mongoaudit version " + version)
print("There's a new version " + latest)
_upgrade(releases)
except (urllib2.HTTPError, urllib2.URLError):
print("Couldn't check for upgrades")
except os.error:
print("Couldn't write mongoaudit binary")
def TestSite(url):
protocheck(url)
print "Trying: " + url
try:
urllib2.urlopen(url, timeout=3)
except urllib2.HTTPError, e:
if e.code == 405:
print url + " found!"
print "Now the brute force will begin! >:)"
if e.code == 404:
printout(str(e), YELLOW)
print " - XMLRPC has been moved, removed, or blocked"
sys.exit()
except urllib2.URLError, g:
printout("Could not identify XMLRPC. Please verify the domain.\n", YELLOW)
sys.exit()
except socket.timeout as e:
print type(e)
printout("The socket timed out, try it again.", YELLOW)
sys.exit()
def compute_dependencies(self, filename=REQUIRES):
text = Utils.readf(filename)
data = safe_urlencode([('text', text)])
if '--offline' in sys.argv:
self.constraints = self.local_resolve(text)
else:
req = Request(get_resolve_url(), data)
try:
response = urlopen(req, timeout=TIMEOUT)
except URLError as e:
Logs.warn('The package server is down! %r' % e)
self.constraints = self.local_resolve(text)
else:
ret = response.read()
try:
ret = ret.decode('utf-8')
except Exception:
pass
self.trace(ret)
self.constraints = parse_constraints(ret)
self.check_errors()
def compute_dependencies(self, filename=REQUIRES):
text = Utils.readf(filename)
data = safe_urlencode([('text', text)])
if '--offline' in sys.argv:
self.constraints = self.local_resolve(text)
else:
req = Request(get_resolve_url(), data)
try:
response = urlopen(req, timeout=TIMEOUT)
except URLError as e:
Logs.warn('The package server is down! %r' % e)
self.constraints = self.local_resolve(text)
else:
ret = response.read()
try:
ret = ret.decode('utf-8')
except Exception:
pass
self.trace(ret)
self.constraints = parse_constraints(ret)
self.check_errors()
def compute_dependencies(self, filename=REQUIRES):
text = Utils.readf(filename)
data = safe_urlencode([('text', text)])
if '--offline' in sys.argv:
self.constraints = self.local_resolve(text)
else:
req = Request(get_resolve_url(), data)
try:
response = urlopen(req, timeout=TIMEOUT)
except URLError as e:
Logs.warn('The package server is down! %r' % e)
self.constraints = self.local_resolve(text)
else:
ret = response.read()
try:
ret = ret.decode('utf-8')
except Exception:
pass
self.trace(ret)
self.constraints = parse_constraints(ret)
self.check_errors()
def search(self, url, offset=1, maxoffset=0, title=""):
current_offset = 0
data = ""
self.p.reset(title=title)
while current_offset <= maxoffset:
self.p.rotate()
temp_url = re.sub(r'\[\[OFFSET\]\]', str(current_offset), url)
try:
headers = { 'User-Agent' : self.user_agent }
req = urllib2.Request(temp_url, None, headers)
data += urllib2.urlopen(req).read()
except urllib2.URLError as e:
self.display.error("Could not access [%s]" % (title))
return data
except Exception as e:
print e
current_offset += offset
self.p.done()
return data
def getURLContents(self, url, data=None):
"Returns the contents of the given URL as an Unicode string"
s = ""
success = False
req = Request(url, data, {'User-agent': self.useragent})
try:
f = urlopen(req)
s = f.read()
f.close()
success = True
except HTTPError, e:
print 'Server error: ', e.code
if (self.verbose and BaseHTTPRequestHandler.responses.has_key(e.code)):
title, msg = BaseHTTPRequestHandler.responses[e.code]
print title + ": " + msg
except URLError, e:
print 'Connection error: ', e.reason
dammit = UnicodeDammit(s)
return (success, dammit.unicode)
def getCookie(self):
"""
This method is the first to be called when initializing a
Google dorking object through this library. It is used to
retrieve the Google session cookie needed to perform the
further search
"""
try:
conn = self.opener.open("http://www.google.com/ncr")
headers = conn.info()
except urllib2.HTTPError, e:
headers = e.info()
except urllib2.URLError, e:
errMsg = "unable to connect to Google"
raise sqlmapConnectionException, errMsg
def _api_call(url, opener):
"""
Makes a REST call against the Couchbase API.
Args:
url (str): The URL to get, including endpoint
Returns:
list: The JSON response
"""
try:
urllib2.install_opener(opener)
resp = urllib2.urlopen(url, timeout=http_timeout)
except (urllib2.HTTPError, urllib2.URLError) as e:
collectd.error("Error making API call (%s) %s" % (e, url))
return None
try:
return json.load(resp)
except ValueError, e:
collectd.error("Error parsing JSON for API call (%s) %s" % (e, url))
return None
def _woxikon_de_url_handler(target):
'''
Query woxikon for sysnonym
'''
time_out_choice = float(get_variable(
'tq_online_backends_timeout', _timeout_period_default))
try:
response = urlopen(fixurl(u'http://synonyms.woxikon.com/de/{0}'.format(target)).decode('ASCII'), timeout = time_out_choice)
web_content = StringIO(unescape(decode_utf_8(response.read())))
response.close()
except HTTPError:
return 1
except URLError as err:
if isinstance(err.reason, socket.timeout): # timeout error?
return 1
return -1 # other error
except socket.timeout: # timeout error failed to be captured by URLError
return 1
return web_content
def _jeck_ru_url_handler(target):
'''
Query jiport for sysnonym
'''
time_out_choice = float(get_variable(
'tq_online_backends_timeout', _timeout_period_default))
try:
response = urlopen(fixurl(u'http://jeck.ru/tools/SynonymsDictionary/{0}'.format(target)).decode('ASCII'), timeout = time_out_choice)
web_content = StringIO(decode_utf_8(response.read()))
response.close()
except HTTPError:
return 1
except URLError as err:
if isinstance(err.reason, socket.timeout): # timeout error?
return 1
return -1 # any other error
except socket.timeout: # if timeout error not captured by URLError
return 1
return web_content
def get_target():
global client, db
cursor = db.Shodita.find({"bot":"Shizuka"})
for document in cursor:
if check_domain_mongodb(document["ip"], document["dominio"]):
print colores.verde + "[INFO] Domain: " + document["dominio"] + " already scanned" + colores.normal
pass
else:
url = "http://" + document["dominio"]
headers = {'User-Agent' : 'Mozilla 5.10'}
request = Request(url, None, headers)
try:
response = urlopen(request, timeout=10)
if response.code == 200 or response.code == "OK":
html = response.read()
if detect_wp(html, document["dominio"]) == True:
insert_mongodb("WordPress", document["dominio"], document["ip"])
print colores.verde + "[+][INFO] " + document["dominio"] + " is WordPress" + colores.normal
if detect_joomla(html):
insert_mongodb("Joomla", document["dominio"], document["ip"])
print colores.verde + "[+][INFO] " + document["dominio"] + " is Joomla" + colores.normal
if detect_drupal(html):
insert_mongodb("Drupal", document["dominio"], document["ip"])
print colores.verde + "[+][INFO] " + document["dominio"] + " is Drupal" + colores.normal
except URLError, e:
continue
except httplib.BadStatusLine:
continue
except:
continue
def mm_heartbeat(self):
# Check if stop or set next timer
if self.shutdown:
return
threading.Timer(self.hb_timer, self.mm_heartbeat).start()
address = ("http://" + self.mm_host + ":" + self.mm_port + "/alexapi?action=AVSHB")
logger.debug("Sending MM Heatbeat")
try:
response = urlopen(address).read()
except URLError as err:
logger.error("URLError: %s", err.reason)
return
logger.debug("Response: " + response)
def send_remote_shutdown_command(self):
try:
from urllib import request as url_request
URLError = url_request.URLError
except ImportError:
import urllib2 as url_request
import urllib2
URLError = urllib2.URLError
try:
url_request.urlopen("http://127.0.0.1:%d/shutdown" % self.port)
except URLError:
return
count = 0
while self.is_connectable():
if count == 30:
break
count += 1
time.sleep(1)
def request_url(url, referer='http://www.google.com'):
common.plugin.log('request_url : %s' % url)
req = urllib2.Request(url)
req.addheaders = [('Referer', referer),('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.3) Gecko/20100101 Firefox/11.0 ( .NET CLR 3.5.30729)')]
try:
response = urllib2.urlopen(req)
data = response.read()
response.close()
except urllib2.URLError as e:
common.plugin.log_error("Remote request error for URL %s: %r" % (url,e))
return
except socket.timeout, e:
common.plugin.log_error("Remote request error for URL %s: %r" % (url,e))
return
return data
def basic_auth(server="http://127.0.0.1"):
"""
to use basic login with a different server
from gluon.contrib.login_methods.basic_auth import basic_auth
auth.settings.login_methods.append(basic_auth('http://server'))
"""
def basic_login_aux(username,
password,
server=server):
key = base64.b64encode(username + ':' + password)
headers = {'Authorization': 'Basic ' + key}
request = urllib2.Request(server, None, headers)
try:
urllib2.urlopen(request)
return True
except (urllib2.URLError, urllib2.HTTPError):
return False
return basic_login_aux
def block_until_http_ready(urlstring, wait_time=10, timeout=240):
"Blocks until server at urlstring can respond to http requests"
server_ready = False
t_elapsed = 0
while not server_ready and t_elapsed < timeout:
try:
sys.stdout.write('.')
sys.stdout.flush()
req = urllib2.Request(urlstring)
response = urllib2.urlopen(req)
#if response.code == 200:
server_ready = True
except urllib2.URLError:
pass
time.sleep(wait_time)
t_elapsed += wait_time
def run(self):
if len(self.__update_rates) == 0:
return
# wait up to 120 seconds, to get some distortion
self.__stop_event.wait(randint(0, 120))
while not self.__stop_event.is_set():
start = time.time()
for update in self.__update_rates:
rate = update[0]
now = time.time()
time_to_wait = round(start - now + rate / 1000, 0)
interrupt = self.__stop_event.wait(time_to_wait)
if interrupt:
return
try:
self.start_calculation(update[1])
except URLError as e:
logging.getLogger(__name__).error("Could not connect to InfluxDB: " + str(e))
except:
logging.getLogger(__name__).error("Job execution failed", exc_info=True)
def download_and_import(self, repo):
try:
response = urllib2.urlopen(GITHUB_LINK.format(repo))
response_sio = StringIO.StringIO(response.read())
with zipfile.ZipFile(response_sio) as repo_zip:
repo_zip.extractall(tempfile.tempdir)
deck_base_name = repo.split("/")[-1]
deck_directory_wb = Path(tempfile.tempdir).joinpath(deck_base_name + "-" + BRANCH_NAME)
deck_directory = Path(tempfile.tempdir).joinpath(deck_base_name)
utils.fs_remove(deck_directory)
deck_directory_wb.rename(deck_directory)
# Todo progressbar on download
AnkiJsonImporter.import_deck(self.collection, deck_directory)
except (urllib2.URLError, urllib2.HTTPError, OSError) as error:
aqt.utils.showWarning("Error while trying to get deck from Github: {}".format(error))
raise
def check_for_update():
if os.path.exists(FILE_UPDATE):
mtime = os.path.getmtime(FILE_UPDATE)
last = datetime.utcfromtimestamp(mtime).strftime('%Y-%m-%d')
today = datetime.utcnow().strftime('%Y-%m-%d')
if last == today:
return
try:
with open(FILE_UPDATE, 'a'):
os.utime(FILE_UPDATE, None)
request = urllib2.Request(
CORE_VERSION_URL,
urllib.urlencode({'version': main.__version__}),
)
response = urllib2.urlopen(request)
with open(FILE_UPDATE, 'w') as update_json:
update_json.write(response.read())
except (urllib2.HTTPError, urllib2.URLError):
pass
def get_page(self, url, data=None):
handlers = [PoolHTTPHandler]
opener = urllib2.build_opener(*handlers)
if data: data = urllib.urlencode(data)
request = urllib2.Request(url, data, self.headers)
try:
response = opener.open(request)
return response.read()
except (urllib2.HTTPError, urllib2.URLError), e:
raise BrowserError(url, str(e))
except (socket.error, socket.sslerror), msg:
raise BrowserError(url, msg)
except socket.timeout, e:
raise BrowserError(url, "timeout")
except KeyboardInterrupt:
raise
except:
raise BrowserError(url, "unknown error")
def check_for_update():
if os.path.exists(FILE_UPDATE):
mtime = os.path.getmtime(FILE_UPDATE)
last = datetime.utcfromtimestamp(mtime).strftime('%Y-%m-%d')
today = datetime.utcnow().strftime('%Y-%m-%d')
if last == today:
return
try:
with open(FILE_UPDATE, 'a'):
os.utime(FILE_UPDATE, None)
request = urllib2.Request(
CORE_VERSION_URL,
urllib.urlencode({'version': main.__version__}),
)
response = urllib2.urlopen(request)
with open(FILE_UPDATE, 'w') as update_json:
update_json.write(response.read())
except (urllib2.HTTPError, urllib2.URLError):
pass
def postInfo(self, requestParams):
logging.info("About to phone home to [%s].", self.url)
req = urllib2.Request(self.url)
req.add_header('Content-Type', 'application/json')
resp = None
try:
resp = urllib2.urlopen(req, json.dumps(requestParams), timeout = 30, **self.kwargs)
resp = resp.read()
except urllib2.HTTPError, e:
logging.error("HTTPError: %s", str(e.code))
except urllib2.URLError, e:
logging.error("URLError: %s", str(e.reason))
except httplib.HTTPException, e:
logging.error("HTTPException: %s", str(e))
except Exception, e:
logging.exception("Unexpected error: %s", str(e))
return resp
def download(url, headers, proxy, num_retries, data=None):
print 'Downloading:', url
request = urllib2.Request(url, data, headers)
opener = urllib2.build_opener()
if proxy:
proxy_params = {urlparse.urlparse(url).scheme: proxy}
opener.add_handler(urllib2.ProxyHandler(proxy_params))
try:
response = opener.open(request)
html = response.read()
code = response.code
except urllib2.URLError as e:
print 'Download error:', e.reason
html = ''
if hasattr(e, 'code'):
code = e.code
if num_retries > 0 and 500 <= code < 600:
# retry 5XX HTTP errors
html = download(url, headers, proxy, num_retries-1, data)
else:
code = None
return html
def download(url, headers, proxy, num_retries, data=None):
print 'Downloading:', url
request = urllib2.Request(url, data, headers)
opener = urllib2.build_opener()
if proxy:
proxy_params = {urlparse.urlparse(url).scheme: proxy}
opener.add_handler(urllib2.ProxyHandler(proxy_params))
try:
response = opener.open(request)
html = response.read()
code = response.code
except urllib2.URLError as e:
print 'Download error:', e.reason
html = ''
if hasattr(e, 'code'):
code = e.code
if num_retries > 0 and 500 <= code < 600:
# retry 5XX HTTP errors
return download(url, headers, proxy, num_retries-1, data)
else:
code = None
return html
def download5(url, user_agent='wswp', proxy=None, num_retries=2):
"""Download function with support for proxies"""
print 'Downloading:', url
headers = {'User-agent': user_agent}
request = urllib2.Request(url, headers=headers)
opener = urllib2.build_opener()
if proxy:
proxy_params = {urlparse.urlparse(url).scheme: proxy}
opener.add_handler(urllib2.ProxyHandler(proxy_params))
try:
html = opener.open(request).read()
except urllib2.URLError as e:
print 'Download error:', e.reason
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
# retry 5XX HTTP errors
html = download5(url, user_agent, proxy, num_retries-1)
return html