def sync_one(cls, external_id, last_error=None):
post_data = cls.pipedrive_api_client.get_instance(external_id)
# Error code from the API
if not post_data[u'success']:
logging.error(post_data)
raise UnableToSyncException(cls, external_id)
try:
return cls.update_or_create_entity_from_api_post(post_data[u'data'])
except IntegrityError as e:
logging.warning(e)
if e.message == last_error:
raise SameErrorTwiceSyncException(cls, external_id, e.message)
match = re.search('.*Key \((.*)\)=\((.*)\).*', e.message)
if match:
field_name = match.group(1)
field_id = match.group(2)
model = cls.field_model_map(field_name)
model.sync_one(field_id)
return cls.sync_one(external_id, e.message)
else:
raise Exception("Could not handle error message")
python类search()的实例源码
def addToCart(self):
print '\nADD TO CART -----------------'
session_get = self.user_session.get(self.URL_product_url, headers=self.get_headers)
#print session_get.content
soup = BeautifulSoup(session_get.content, 'lxml')
results = soup.find_all('select', class_='size-select')
#print results
for item in results[0].select('option'):
re_result = re.sub(self.sub_pattern, '', item.string)
#print re_result
matchObj = re.search(r"^%s+$" % self.user_size, re_result)
if matchObj:
self.post_data_addToCart['pid'] = item['value']
self.post_data_addToCart['masterPID'] = item['value'].partition("_")[0]
print self.post_data_addToCart
break
session_post = self.user_session.post(url=self.URL_cart_post_url, headers=self.post_headers, data=self.post_data_addToCart)
print 'Add To Cart Status: ' + str(session_post.status_code)
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def get_sqls(self):
"""This function extracts sqls from mysql general log file.
Returns:
A list of :class:`SQL`. For example:
[SQL('', u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]
"""
general_log = open(self.log_path)
log = GeneralQueryLog(general_log)
session_db_map = {}
sqls = []
for entry in log:
if entry['command'] == 'Connect':
m = re.search('\s+on\s(?P<name>\w+)', entry['argument'])
if m:
session_db_map[entry['session_id']] = m.groupdict()['name'].strip()
elif entry['command'] == 'Init DB':
session_db_map[entry['session_id']] = entry['argument'].strip()
elif entry['command'] == 'Query':
sql = entry['argument']
if sql.strip()[:6].lower() == 'select':
yield SQL(session_db_map.get(entry['session_id'], ''), sql)
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def scan_thread(keyword, catalog_json):
# Check each thread, threads who contains the keyword are returned
matched_threads = []
for i in range(len(catalog_json)):
for thread in catalog_json[i]["threads"]:
regex = r'\b{0}\b'.format(keyword)
# Search thread title
if 'sub' in thread:
if re.search(regex, str(thread["sub"]), re.IGNORECASE):
matched_threads.append(thread["no"])
# Search OPs post body
if 'com' in thread:
if re.search(regex, str(thread["com"]), re.IGNORECASE):
matched_threads.append(thread["no"])
return matched_threads
def alter_table(self, alter_string):
lines = alter_string.replace('\n', ' ').split(';')
for line in lines:
if 'PRIMARY KEY' in line:
table_name = re.search("TABLE `(\w+)`", line).group(1)
table = self.get_table_by_name(table_name)
primary_key_columns = re.findall("PRIMARY KEY \(`(\w+)`\)", line)
for primary_key_column in primary_key_columns:
table.add_primary_key(primary_key_column)
elif 'FOREIGN KEY' in line:
table_name = re.search("TABLE `(\w+)`", line).group(1)
table = self.get_table_by_name(table_name)
foreign_keys_list = re.findall("FOREIGN KEY \(`(\w+)`\) REFERENCES `(\w+)` \(`(\w+)`\)", line)
for column, foreign_table, foreign_column in foreign_keys_list:
table.add_foreign_key(column, foreign_table, foreign_column)
def allocate(self, ip_addr, name, platform, cpus, memory, disk):
"""When a node is found, scheduler calls this method with IP address,
name, CPUs, memory and disk available on that node. This method should
return a number indicating number of CPUs to use. If return value is 0,
the node is not used; if the return value is < 0, this allocation is
ignored (next allocation in the 'node_allocations' list, if any, is
applied).
"""
if not re.match(self.ip_rex, ip_addr):
return -1
if (self.platform and not re.search(self.platform, platform)):
return -1
if ((self.memory and memory and self.memory > memory) or
(self.disk and disk and self.disk > disk)):
return 0
if self.cpus > 0:
if self.cpus > cpus:
return 0
return self.cpus
elif self.cpus == 0:
return 0
else:
cpus += self.cpus
if cpus < 0:
return 0
return cpus
def allocate(self, ip_addr, name, platform, cpus, memory, disk):
"""When a node is found, scheduler calls this method with IP address,
name, CPUs, memory and disk available on that node. This method should
return a number indicating number of CPUs to use. If return value is 0,
the node is not used; if the return value is < 0, this allocation is
ignored (next allocation in the 'node_allocations' list, if any, is
applied).
"""
if not re.match(self.ip_rex, ip_addr):
return -1
if (self.platform and not re.search(self.platform, platform)):
return -1
if ((self.memory and memory and self.memory > memory) or
(self.disk and disk and self.disk > disk)):
return 0
if self.cpus > 0:
if self.cpus > cpus:
return 0
return self.cpus
elif self.cpus == 0:
return 0
else:
cpus += self.cpus
if cpus < 0:
return 0
return cpus
def Resolver(obj, path, full = False):
m = re.search('#/(.*)/(.*)', path)
x = None
if full:
b = obj[m.group(1)]
x = b[m.group(2)]
else:
x = m.group(2)
return x
def list_nics(nic_type=None):
"""Return a list of nics of given type(s)"""
if isinstance(nic_type, six.string_types):
int_types = [nic_type]
else:
int_types = nic_type
interfaces = []
if nic_type:
for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
ip_output = subprocess.check_output(cmd).decode('UTF-8')
ip_output = ip_output.split('\n')
ip_output = (line for line in ip_output if line)
for line in ip_output:
if line.split()[1].startswith(int_type):
matched = re.search('.*: (' + int_type +
r'[0-9]+\.[0-9]+)@.*', line)
if matched:
iface = matched.groups()[0]
else:
iface = line.split()[1].replace(":", "")
if iface not in interfaces:
interfaces.append(iface)
else:
cmd = ['ip', 'a']
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
ip_output = (line.strip() for line in ip_output if line)
key = re.compile('^[0-9]+:\s+(.+):')
for line in ip_output:
matched = re.search(key, line)
if matched:
iface = matched.group(1)
iface = iface.partition("@")[0]
if iface not in interfaces:
interfaces.append(iface)
return interfaces
def is_device_mounted(device):
'''Given a device path, return True if that device is mounted, and False
if it isn't.
:param device: str: Full path of the device to check.
:returns: boolean: True if the path represents a mounted device, False if
it doesn't.
'''
try:
out = check_output(['lsblk', '-P', device]).decode('UTF-8')
except:
return False
return bool(re.search(r'MOUNTPOINT=".+"', out))
linux-soft-exploit-suggester.py 文件源码
项目:linux-soft-exploit-suggester
作者: belane
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def parseDebian(packages_file):
""" Parse debian package list to dict (name:version) """
result = {}
if args.clean==True: first_field = 0
else: first_field = 1
for line in packages_file:
if args.clean==True or line[:2] == 'ii':
fields = line.split()
if len(fields) < 2 + first_field: continue
# Software name
search = fields[first_field].find(':')
if search != -1:
soft_name = cleanName(fields[first_field][:search])
else:
soft_name = cleanName(fields[first_field])
# Version
search = re.search(r"-|\+|~", fields[first_field + 1])
if search:
soft_version = fields[first_field + 1][:search.span()[0]]
else:
soft_version = fields[first_field + 1]
search = soft_version.find(':')
if search != -1:
soft_version = soft_version[search + 1:]
soft_version = purgeVersionString(soft_version)
# Format check
if not soft_name or not soft_version: continue
# Intense package name split
if args.intense and '-' in soft_name:
for sub_package in soft_name.split('-'):
if len(sub_package)>2 and '.' not in sub_package and sub_package not in badpackages: result[sub_package] = soft_version
else:
if soft_name not in badpackages: result[soft_name] = soft_version
return result
linux-soft-exploit-suggester.py 文件源码
项目:linux-soft-exploit-suggester
作者: belane
项目源码
文件源码
阅读 37
收藏 0
点赞 0
评论 0
def purgeVersionString(version_string):
""" Eliminate invalid characters and last dot from version string """
search = re.search(r'[^0-9.]', version_string)
if search: result = version_string[:search.span()[0]]
else: result = version_string
if len(result) > 0 and result[-1] == '.': result = result[:-1]
return result
def _search_for_query(self, query):
if query in self._search_pattern_cache:
return self._search_pattern_cache[query]
# Build pattern: include all characters
pattern = []
for c in query:
# pattern.append('[^{0}]*{0}'.format(re.escape(c)))
pattern.append('.*?{0}'.format(re.escape(c)))
pattern = ''.join(pattern)
search = re.compile(pattern, re.IGNORECASE).search
self._search_pattern_cache[query] = search
return search
def get_password(self, account, service=None):
"""Retrieve the password saved at ``service/account``.
Raise :class:`PasswordNotFound` exception if password doesn't exist.
:param account: name of the account the password is for, e.g.
"Pinboard"
:type account: ``unicode``
:param service: Name of the service. By default, this is the workflow's
bundle ID
:type service: ``unicode``
:returns: account password
:rtype: ``unicode``
"""
if not service:
service = self.bundleid
output = self._call_security('find-generic-password', service,
account, '-g')
# Parsing of `security` output is adapted from python-keyring
# by Jason R. Coombs
# https://pypi.python.org/pypi/keyring
m = re.search(
r'password:\s*(?:0x(?P<hex>[0-9A-F]+)\s*)?(?:"(?P<pw>.*)")?',
output)
if m:
groups = m.groupdict()
h = groups.get('hex')
password = groups.get('pw')
if h:
password = unicode(binascii.unhexlify(h), 'utf-8')
self.logger.debug('Got password : %s:%s', service, account)
return password
def swizzleUrl(self, url, includeToken=False):
m = re.Search("^\w+:\/\/.+?(\/.+)", url)
newUrl = m and m.group(1) or None
return self.buildUrl(newUrl or url, includeToken)