def keep_redirecting(r, my_pub):
# don't read r.content unless we have to, because it will cause us to download the whole thig instead of just the headers
# 10.5762/kais.2016.17.5.316
if ("content-length" in r.headers):
# manually follow javascript if that's all that's in the payload
file_size = int(r.headers["content-length"])
if file_size < 500:
matches = re.findall(ur"<script>location.href='(.*)'</script>", r.content, re.IGNORECASE)
if matches:
redirect_url = matches[0]
if redirect_url.startswith(u"/"):
redirect_url = get_link_target(redirect_url, r.url)
return redirect_url
# 10.1097/00003643-201406001-00238
if my_pub and my_pub.is_same_publisher("Ovid Technologies (Wolters Kluwer Health)"):
matches = re.findall(ur"OvidAN = '(.*?)';", r.content, re.IGNORECASE)
if matches:
an_number = matches[0]
redirect_url = "http://content.wkhealth.com/linkback/openurl?an={}".format(an_number)
return redirect_url
return None
python类IGNORECASE的实例源码
def _maybe_update_line_package(self, line, package):
original_line = line
pattern = r'\b{package}(?:\[\w*\])?=={old_version}\b'.format(
package=re.escape(package['name']),
old_version=re.escape(str(package['current_version'])))
if re.search(pattern, line, flags=re.IGNORECASE):
line = line.replace(
'=={}'.format(package['current_version']),
'=={}'.format(package['latest_version'])
)
if line != original_line:
self.upgraded_packages.append(package)
if self.dry_run: # pragma: nocover
print('[Dry Run]: skipping requirements replacement:',
original_line.replace('\n', ''), ' / ',
line.replace('\n', ''))
return original_line
return line
def _do_match(self, text):
if self.use_regex:
try:
flags = re.UNICODE
if not self.case_sensitive:
flags |= re.IGNORECASE
return bool(re.findall(self.pattern, text, flags=flags))
except Exception as ex:
logger.warning('Regular expression match failed', exc_info=True)
raise self.BadPatternException(str(ex))
else:
if self.case_sensitive:
pattern = self.pattern
else:
pattern = self.pattern.lower()
text = text.lower()
return pattern in text
def find_templates():
"""
Load python modules from templates directory and get templates list
:return: list of tuples (pairs):
[(compiled regex, lambda regex_match: return message_data)]
"""
templates = []
templates_directory = (inspect.getsourcefile(lambda: 0).rstrip('__init__.py') +
'templates')
template_files = os.listdir(templates_directory)
for template_file in template_files:
if template_file.startswith('.') or not template_file.endswith('.py'):
continue
# Hack for dev development and disutils
try:
template_module = importlib.import_module('templates.{}'.format(
template_file.rstrip('.py')
))
except ImportError:
template_module = importlib.import_module('ross.templates.{}'.format(
template_file.rstrip('.py')
))
# Iterate throw items in template.
# If there are variable ends with 'templates',
# extend templates list with it.
for (name, content) in template_module.__dict__.items():
if name.endswith('templates'):
for (regex_text, data_func) in content:
templates.append((re.compile(regex_text, re.IGNORECASE), data_func))
return templates
def valid_url(self, url):
p = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$',
re.IGNORECASE)
if p.match(url):
return True
else:
return False
def printable_usage(doc):
# in python < 2.7 you can't pass flags=re.IGNORECASE
usage_split = re.split(r'([Uu][Ss][Aa][Gg][Ee]:)', doc)
if len(usage_split) < 3:
raise DocoptLanguageError('"usage:" (case-insensitive) not found.')
if len(usage_split) > 3:
raise DocoptLanguageError('More than one "usage:" (case-insensitive).')
return re.split(r'\n\s*\n', ''.join(usage_split[1:]))[0].strip()
def _search_for_query(self, query):
if query in self._search_pattern_cache:
return self._search_pattern_cache[query]
# Build pattern: include all characters
pattern = []
for c in query:
# pattern.append('[^{0}]*{0}'.format(re.escape(c)))
pattern.append('.*?{0}'.format(re.escape(c)))
pattern = ''.join(pattern)
search = re.compile(pattern, re.IGNORECASE).search
self._search_pattern_cache[query] = search
return search
def get_enlisted_regex_matches(formatted_comment):
"""
Gets a regex match for enlisted AFSCs.
Note: Enlisted matching is NOT case sensitive.
:param formatted_comment: string not including any quoted text
:return: regex matches
"""
enlisted_AFSC_search = re.compile(ENLISTED_AFSC_REGEX, re.IGNORECASE)
matched_comments_enlisted = enlisted_AFSC_search.finditer(formatted_comment)
return matched_comments_enlisted
def checkFactorDB(n):
"""See if the modulus is already factored on factordb.com,
and if so get the factors"""
# Factordb gives id's of numbers, which act as links for full number
# follow the id's and get the actual numbers
r = requests.get('http://www.factordb.com/index.php?query=%s' % str(n))
regex = re.compile("index\.php\?id\=([0-9]+)", re.IGNORECASE)
ids = regex.findall(r.text)
# These give you ID's to the actual number
p_id = ids[1]
q_id = ids[2]
# follow ID's
regex = re.compile("value=\"([0-9]+)\"", re.IGNORECASE)
r_1 = requests.get('http://www.factordb.com/index.php?id=%s' % p_id)
r_2 = requests.get('http://www.factordb.com/index.php?id=%s' % q_id)
# Get numbers
p = int(regex.findall(r_1.text)[0])
print(p)
ans = 1
n = int(n)
p = int(p)
print(n)
while n % p == 0:
ans *= p
n /= p
print(ans,n, ans*n)
return (ans, n)
def handle_401(self, r, **kwargs):
"""
Takes the given response and tries digest-auth, if needed.
:rtype: requests.Response
"""
if self._thread_local.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self._thread_local.num_401_calls = 1
return r
def _adjust_header(cls, type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
return new_header if cls._use_header(new_header) else orig_header
def _get_device_counters(self, device):
r = re.compile('/ctr[0-9]+$', flags=re.IGNORECASE)
co_phys_chan_names = [c.name for c in device.co_physical_chans]
return list(filter(r.search, co_phys_chan_names))
def _get_device_pfi_lines(self, device):
r = re.compile('/PFI[0-9]+$', flags=re.IGNORECASE)
return list(filter(r.search, device.terminals))
def checkMatch(self, fieldValue, acListEntry):
pattern = re.compile(re.escape(fieldValue) + '.*', re.IGNORECASE)
return re.match(pattern, acListEntry)
# function to get all matches as a list
def handle_401(self, r, **kwargs):
"""
Takes the given response and tries digest-auth, if needed.
:rtype: requests.Response
"""
if self._thread_local.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self._thread_local.num_401_calls = 1
return r
def setup(bot=None):
global url_finder
# TODO figure out why this is needed, and get rid of it, because really?
if not bot:
return
bot.config.define_section('url', UrlSection)
if bot.config.url.exclude:
regexes = [re.compile(s) for s in bot.config.url.exclude]
else:
regexes = []
# We're keeping these in their own list, rather than putting then in the
# callbacks list because 1, it's easier to deal with modules that are still
# using this list, and not the newer callbacks list and 2, having a lambda
# just to pass is kinda ugly.
if not bot.memory.contains('url_exclude'):
bot.memory['url_exclude'] = regexes
else:
exclude = bot.memory['url_exclude']
if regexes:
exclude.extend(regexes)
bot.memory['url_exclude'] = exclude
# Ensure that url_callbacks and last_seen_url are in memory
if not bot.memory.contains('url_callbacks'):
bot.memory['url_callbacks'] = tools.SopelMemory()
if not bot.memory.contains('last_seen_url'):
bot.memory['last_seen_url'] = tools.SopelMemory()
url_finder = re.compile(r'(?u)(%s?(?:http|https|ftp)(?:://\S+))' %
(bot.config.url.exclusion_char), re.IGNORECASE)
def valid_url(self, url):
p = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$',
re.IGNORECASE)
if p.match(url):
return True
else:
return False
def is_color(string):
"""Checks if the given string is a valid color.
Arguments:
string -- the string to check
"""
if string in NAMED_COLORS:
return True
if re.match(r"^#?[0-9a-f]{3}([0-9a-f]{3})?$", string, re.IGNORECASE):
return True
return False
def update_handler(d):
global flag
global variable
# On this example, we just show the update object itself
d = str(d)
#testChannel
re1 = '( id: )(?:[0-9][0-9]+)(,)'
rg = re.compile(re1,re.IGNORECASE|re.DOTALL)
m = rg.search(d)
if m:
word1=m.group(0)
word2=word1.replace(' id: ', '')
word3=word2.replace(',', '')
word4=word3
idd = int(word4)
peer1 = InputPeerSelf()
#INPUT YOUR KEYWORDS BELOW
word_list = ["#DCR", "#LTC", "#NAUT", "#NXT", "#XCP", "#GRC", "#REP", "#PPC", "#RIC", "#STRAT", "#GAME", "#BTM", "#CLAM", "#ARDR", "#BLK", "#OMNI", "#SJCX", "#FLDC", "#BCH", "#POT", "#VRC", "#ETH", "#PINK", "#NOTE", "#BTS", "#AMP", "#NAV", "#BELA", "#ETC", "#FLO", "#VIA", "#XBC", "#XPM", "#DASH", "#XVC", "#GNO", "#NMC", "#RADS", "#VTC", "#XEM", "#FCT", "#XRP", "#NXC", "#STEEM", "#SBD", "#BURST", "#XMR", "#DGB", "#LBC", "#BCY", "#PASC", "#LSK", "#EXP", "#MAID", "#BTCD", "#SYS", "#GNT", "#HUC", "#EMC2", "#NEOS", "#ZEC", "#STR"]
regex_string = "(?<=\W)(%s)(?=\W)" % "|".join(word_list)
finder = re.compile(regex_string)
string_to_be_searched = d
results = finder.findall(" %s " % string_to_be_searched)
result_set = set(results)
print(idd)
for word in word_list:
if word in result_set:
try:
var = word
var1 = var.replace('#', '')
btc = '-BTC'
variable = var1 + btc
if (os.path.isfile(pid)):
print('Waiting on current process to finish... If you experience errors, delete process.run')
else:
sell = 'notready'
m = multiprocessing.Process(target = runitt , args = ())
m.start()
client(ForwardMessageRequest(peer=peer1, id=(idd), random_id=(generate_random_long())))
except Exception as e:
print(e)
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)