def DownloadSetting(url):
list = []
try:
req = urllib2.Request(url)
req.add_header('User-Agent', 'VAS')
response = urllib2.urlopen(req)
link = response.read()
response.close()
xx = re.compile('<td><a href="(.+?)">(.+?)</a></td>.*?<td>(.+?)</td>', re.DOTALL).findall(link)
for link, name, date in xx:
print link, name, date
prelink = ''
if not link.startswith("http://"):
prelink = url.replace('asd.php','')
list.append((date, name, prelink + link))
except:
print"ERROR DownloadSetting %s" %(url)
return list
python类Request()的实例源码
def _call_ACIS(self, kwargs, **moreKwargs):
'''
Core method for calling the ACIS services.
Returns python dictionary by de-serializing json response
'''
#self._formatInputDict(**kwargs)
kwargs.update(moreKwargs)
self._input_dict = self._stripNoneValues(kwargs)
self.url = self.baseURL + self.webServiceSource
if pyVersion == 2: #python 2.x
params = urllib.urlencode({'params':json.dumps(self._input_dict)})
request = urllib2.Request(self.url, params, {'Accept':'application/json'})
response = urllib2.urlopen(request)
jsonData = response.read()
elif pyVersion == 3: #python 3.x
params = urllib.parse.urlencode({'params':json.dumps(self._input_dict)})
params = params.encode('utf-8')
req = urllib.request.urlopen(self.url, data = params)
jsonData = req.read().decode()
return json.loads(jsonData)
def get_target():
global client, db
cursor = db.Shodita.find({"bot":"Shizuka"})
for document in cursor:
if check_domain_mongodb(document["ip"], document["dominio"]):
print colores.verde + "[INFO] Domain: " + document["dominio"] + " already scanned" + colores.normal
pass
else:
url = "http://" + document["dominio"]
headers = {'User-Agent' : 'Mozilla 5.10'}
request = Request(url, None, headers)
try:
response = urlopen(request, timeout=10)
if response.code == 200 or response.code == "OK":
html = response.read()
if detect_wp(html, document["dominio"]) == True:
insert_mongodb("WordPress", document["dominio"], document["ip"])
print colores.verde + "[+][INFO] " + document["dominio"] + " is WordPress" + colores.normal
if detect_joomla(html):
insert_mongodb("Joomla", document["dominio"], document["ip"])
print colores.verde + "[+][INFO] " + document["dominio"] + " is Joomla" + colores.normal
if detect_drupal(html):
insert_mongodb("Drupal", document["dominio"], document["ip"])
print colores.verde + "[+][INFO] " + document["dominio"] + " is Drupal" + colores.normal
except URLError, e:
continue
except httplib.BadStatusLine:
continue
except:
continue
def for_rc():
#rc = []
apiurl="https://zh.moegirl.org/api.php"
format="%Y%m%d%H%M%S"
utc=datetime.datetime.utcnow()
rcstart=(utc-datetime.timedelta(hours=1)).strftime(format)
rcend=utc.strftime(format)
parmas=urllib.urlencode({'format':'json','action':'query','list':'recentchanges','rcstart':rcstart,'rcend':rcend,'rcdir':'newer','rcnamespace':'0','rctoponly':'','rctype':'edit|new','continue':'','rcprop':'title|sizes'})
req=urllib2.Request(url=apiurl,data=parmas)
res_data=urllib2.urlopen(req)
ori=res_data.read()
rcc=json.loads(ori,object_hook=_decode_dict)
rcc=OrderedDict(rcc)
key = rcc['query'].keys()[0]
lists = rcc['query'][key]
#print type(value)
#for i in range(len(value)):
#rc.append(value[i]['title'])
return lists
def query(self, f_table, f_keyword, f_type = None, f_netmask = None):
path = "/api/%s/keyword/%s/" %(f_table, f_keyword)
if f_type is not None:
path = "%srtype/%s/" %(path, FLINT_TYPES[f_type])
if f_netmask is not None:
path = "%smask/%s/" %(path, str(f_netmask))
#if options.source:
#path = "%ssource/%s/"%(path, str(options.source))
if self.api.startswith("http://"):
url = "%s%s" %(self.api, path)
else:
url = "http://%s%s" %(self.api, path)
req = urllib2.Request(url)
req = self.setup_header(req, path)
return self._do_query(req, max_retry = self.MAX_RETRY)
def send_log():
ldir = options['logsfolder']
dirs = [d for d in os.listdir(ldir) if os.path.isdir(os.path.join(ldir, d))]
dirs = [os.path.join(ldir, d) for d in dirs]
latest_subdir = max(dirs, key=os.path.getmtime)
logfolder = latest_subdir
logfile = os.path.join(ldir, 'compressedlogs')
shutil.make_archive(logfile, 'zip', logfolder)
logfile = logfile + '.zip'
log_content = open(logfile, 'rb').read()
encoded_log = base64.b64encode(bytes(log_content))
data = {'encoded_log': encoded_log, 'sample_hash': options['sample_hash']}
request = urllib2.Request(options['log-server-url'])
request.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(request, json.dumps(data))
if response.getcode() != 200:
print 'Unable to send data'
def process_record(self, record, dbm):
if self.augment_record and dbm:
record = self.get_record(record, dbm)
if self.unit_system is not None:
record = weewx.units.to_std_system(record, self.unit_system)
url = '%s/write?db=%s' % (self.server_url, self.database)
data = self.get_data(record)
if weewx.debug >= 2:
logdbg('url: %s' % url)
logdbg('data: %s' % data)
if self.skip_upload:
raise AbortedPost()
req = urllib2.Request(url, data)
req.add_header("User-Agent", "weewx/%s" % weewx.__version__)
if self.username is not None:
b64s = base64.encodestring(
'%s:%s' % (self.username, self.password)).replace('\n', '')
req.add_header("Authorization", "Basic %s" % b64s)
req.get_method = lambda: 'POST'
self.post_with_retries(req)
def getHtml(url,post_data=''):
'''
??url??Html??
:param url: ??url
:param post_data: post??
:return: html??
'''
if post_data and isinstance(post_data,dict):
data = urllib.urlencode(post_data)
req = urllib2.Request(url,post_data=data)
else:
req = urllib2.Request(url)
try:
res = urllib2.urlopen(req).read()
return res
except Exception,e:
print( Exception,":",e)
def contact_zabbix_server(self, payload):
"""
Method used to contact the Zabbix server.
:param payload: refers to the json message to send to Zabbix
:return: returns the response from the Zabbix API
"""
data = json.dumps(payload)
req = urllib2.Request('http://' + self.zabbix_host +
'/zabbix/api_jsonrpc.php',
data,
{'Content-Type': 'application/json'})
f = urllib2.urlopen(req)
response = json.loads(f.read())
f.close()
return response
def pointer_to_json(dl_url):
content_req = urllib2.Request(dl_url)
content_result = urllib2.urlopen(content_req)
output = content_result.read()
content_result.close()
oid = (re.search('(?m)^oid sha256:([a-z0-9]+)$', output)).group(1)
size = (re.search('(?m)^size ([0-9]+)$', output)).group(1)
json_data = (
'{"operation": "download", '
'"transfers": ["basic"], '
'"objects": [{"oid": "%s", "size": %s}]}' % (oid, size))
return json_data
# the get_lfs_url function makes a request the the lfs API of the github repo,
# receives a JSON response then gets the download URL from the JSON response
# and returns it.
def get_lfs_url(json_input, lfs_url):
req = urllib2.Request(lfs_url, json_input)
req.add_header("Accept", "application/vnd.git-lfs+json")
req.add_header("Content-Type", "application/vnd.git-lfs+json")
result = urllib2.urlopen(req)
results_python = json.load(result)
file_url = results_python['objects'][0]['actions']['download']['href']
result.close()
return file_url
# --- section 3: actually doing stuff! --------------------- #
# now the fun bit: we actually get to do stuff!
# ---------------------------------------------------------- #
# if the local directory doesn't exist, we make it.
def remove_device(self, serial):
# (url, access_token, api_token) = self.get_api_conf("conf/stf.conf", "renguoliang")
api_url = self.url + "/api/v1/user/devices/%s" % serial
print api_url
token = self.access_token + " " + self.api_token
request = urllib2.Request(api_url)
request.add_header('Authorization', token)
request.get_method = lambda: 'DELETE'
try:
urllib2.urlopen(request)
except Exception, e:
print e.code
print e.read()
# ????????
def _query(self, path, before=None, after=None):
res = []
url = '%s/lookup/%s' % (self.server, path)
params = {}
if self.limit:
params['limit'] = self.limit
if before and after:
params['time_first_after'] = after
params['time_last_before'] = before
else:
if before:
params['time_first_before'] = before
if after:
params['time_last_after'] = after
if params:
url += '?{0}'.format(urllib.urlencode(params))
req = urllib2.Request(url)
req.add_header('Accept', 'application/json')
req.add_header('X-Api-Key', self.apikey)
http = urllib2.urlopen(req)
while True:
line = http.readline()
if not line:
break
yield json.loads(line)
def LoadPage(self, myUrl):
user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
accept = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
headers = {'User-Agent': user_agent, 'Accept': accept}
print self.base_url + myUrl
req = urllib2.Request(self.base_url+myUrl, headers=headers)
myResponse = urllib2.urlopen(req)
myPage = myResponse.read()
#print myPage
# encode?????unicode?????????????
# decode????????????????unicode??
unicodePage = myPage.decode("gb2312", 'ignore').encode('utf-8', 'ignore')
# print unicodePage
return unicodePage
def linksExtractor(url, fileFormat='png'):
tag = 'a'
attr = 'href'
if (fileFormat in ['png', 'jpg', 'jpeg', 'tiff', 'bmp', 'svg', 'gif']):
tag = 'img'
attr = 'src'
try:
headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'}
req=urllib2.Request(url, None, headers)
htmlDoc=urllib2.urlopen(req).read()
except urllib2.HTTPError as err:
print("Server Response : " + str(err.code()))
return "Server refused to connect!"
except urllib2.URLError:
return 'Invalid URL!'
page = BeautifulSoup(htmlDoc, 'html.parser')
page.prettify()
res = []
for link in page.find_all(tag):
pre = link.get(attr)
pre = str(pre)
if (pre[-len(fileFormat):] == fileFormat):
res.append(pre)
else:
pass
if (len(res) < 1):
return 'EMPTY'
return res
def SendMessage(Token,message):
url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=%s' %Token
values = {
"touser": "",
"toparty": "2",
"totag": "",
"msgtype": "text",
"agentid": "2",
"text": {
"content": message
},
"safe":"0"
}
print values
data = json.dumps(values,ensure_ascii=False)
req = urllib2.Request(url, data)
req.add_header('Content-Type', 'application/json')
req.add_header('encoding', 'utf-8')
response = urllib2.urlopen(req)
result = response.read().strip()
print result
result = json.loads(result)
if result['errmsg'] == 'ok':
return 'ok'
else:
return 'Error'
def SendMessage(Token,message):
url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=%s' %Token
values = {
"touser": "",
"toparty": "2",
"totag": "",
"msgtype": "text",
"agentid": "2",
"text": {
"content": message
},
"safe":"0"
}
print values
data = json.dumps(values,ensure_ascii=False)
req = urllib2.Request(url, data)
req.add_header('Content-Type', 'application/json')
req.add_header('encoding', 'utf-8')
response = urllib2.urlopen(req)
result = response.read().strip()
print result
result = json.loads(result)
if result['errmsg'] == 'ok':
return 'ok'
else:
return 'Error'
def SendMessage(Token,message):
url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=%s' %Token
values = {
"touser": "",
"toparty": "2",
"totag": "",
"msgtype": "text",
"agentid": "2",
"text": {
"content": message
},
"safe":"0"
}
print values
data = json.dumps(values,ensure_ascii=False)
req = urllib2.Request(url, data)
req.add_header('Content-Type', 'application/json')
req.add_header('encoding', 'utf-8')
response = urllib2.urlopen(req)
result = response.read().strip()
print result
result = json.loads(result)
if result['errmsg'] == 'ok':
return 'ok'
else:
return 'Error'
def files(self):
if not self._files:
path = '/ajax_details_filelist.php'
url = self.url.path(path).query_param('id', self.id)
request = urllib.request.Request(
url, headers={'User-Agent': "Magic Browser"})
response = urllib.request.urlopen(request).read()
root = html.document_fromstring(response)
rows = root.findall('.//tr')
if len(rows) == 1 and rows[0].find('td').get('colspan') == str(2):
self._files = {}
else:
for row in rows:
name, size = [unicode(v.text_content())
for v in row.findall('.//td')]
self._files[name] = size.replace('\xa0', ' ')
return self._files
def run(self, suppress = False):
"""
Blocks until our request is complete then provides the descriptors. If we
haven't yet started our request then this does so.
:param bool suppress: avoids raising exceptions if **True**
:returns: list for the requested :class:`~stem.descriptor.__init__.Descriptor` instances
:raises:
Using the iterator can fail with the following if **suppress** is
**False**...
* **ValueError** if the descriptor contents is malformed
* **socket.timeout** if our request timed out
* **urllib2.URLError** for most request failures
Note that the urllib2 module may fail with other exception types, in
which case we'll pass it along.
"""
return list(self._run(suppress))
def _pick_url(self, use_authority = False):
"""
Provides a url that can be queried. If we have multiple endpoints then one
will be picked randomly.
:param bool use_authority: ignores our endpoints and uses a directory
authority instead
:returns: **str** for the url being queried by this request
"""
if use_authority or not self.endpoints:
authority = random.choice(filter(HAS_V3IDENT, get_authorities().values()))
address, dirport = authority.address, authority.dir_port
else:
address, dirport = random.choice(self.endpoints)
return 'http://%s:%i/%s' % (address, dirport, self.resource.lstrip('/'))
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
return data
def run(self, suppress = False):
"""
Blocks until our request is complete then provides the descriptors. If we
haven't yet started our request then this does so.
:param bool suppress: avoids raising exceptions if **True**
:returns: list for the requested :class:`~stem.descriptor.__init__.Descriptor` instances
:raises:
Using the iterator can fail with the following if **suppress** is
**False**...
* **ValueError** if the descriptor contents is malformed
* **socket.timeout** if our request timed out
* **urllib2.URLError** for most request failures
Note that the urllib2 module may fail with other exception types, in
which case we'll pass it along.
"""
return list(self._run(suppress))
def _pick_url(self, use_authority = False):
"""
Provides a url that can be queried. If we have multiple endpoints then one
will be picked randomly.
:param bool use_authority: ignores our endpoints and uses a directory
authority instead
:returns: **str** for the url being queried by this request
"""
if use_authority or not self.endpoints:
authority = random.choice(filter(HAS_V3IDENT, get_authorities().values()))
address, dirport = authority.address, authority.dir_port
else:
address, dirport = random.choice(self.endpoints)
return 'http://%s:%i/%s' % (address, dirport, self.resource.lstrip('/'))
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
return data
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
def _make_request(self, opener, request):
"""Make the API call and return the response. This is separated into
it's own function, so we can mock it easily for testing.
:param opener:
:type opener:
:param request: url payload to request
:type request: urllib.Request object
:return: urllib response
"""
try:
return opener.open(request)
except HTTPError as err:
exc = handle_error(err)
exc.__cause__ = None
raise exc
def make_request(self,
method,
request_body=None,
query_params=None,
request_headers=None):
method = method.upper()
if request_headers:
self._set_headers(request_headers)
request_body = json.dumps(request_body) if request_body else None
query_params = query_params if query_params else None
opener = urllib.build_opener()
request = urllib.Request(self._build_url(query_params),
data=request_body)
for key, value in self.request_headers.iteritems():
request.add_header(key, value)
request.get_method = lambda: method
self._response = opener.open(request)
self._set_response(self._response)
self._reset()
def run(self, suppress = False):
"""
Blocks until our request is complete then provides the descriptors. If we
haven't yet started our request then this does so.
:param bool suppress: avoids raising exceptions if **True**
:returns: list for the requested :class:`~stem.descriptor.__init__.Descriptor` instances
:raises:
Using the iterator can fail with the following if **suppress** is
**False**...
* **ValueError** if the descriptor contents is malformed
* **socket.timeout** if our request timed out
* **urllib2.URLError** for most request failures
Note that the urllib2 module may fail with other exception types, in
which case we'll pass it along.
"""
return list(self._run(suppress))