def query(self, cls, filters, limit=None, order_by=None):
if not self.connection:
self._connect()
if not self.connection:
raise NotImplementedError("Can't query without a database connection")
from urllib import urlencode
query = str(self._build_query(cls, filters, limit, order_by))
if query:
url = "/%s?%s" % (self.db_name, urlencode({"query": query}))
else:
url = "/%s" % self.db_name
resp = self._make_request('GET', url)
if resp.status == 200:
doc = parse(resp)
else:
raise Exception("Error: %s" % resp.status)
return self._object_lister(cls, doc)
python类parse()的实例源码
def save_object(self, obj, expected_value=None):
"""
Marshal the object and do a PUT
"""
doc = self.marshal_object(obj)
if obj.id:
url = "/%s/%s" % (self.db_name, obj.id)
else:
url = "/%s" % (self.db_name)
resp = self._make_request("PUT", url, body=doc.toxml())
new_obj = self.get_object_from_doc(obj.__class__, None, parse(resp))
obj.id = new_obj.id
for prop in obj.properties():
try:
propname = prop.name
except AttributeError:
propname = None
if propname:
value = getattr(new_obj, prop.name)
if value:
setattr(obj, prop.name, value)
return obj
def _ParseConfigFile(config_path):
print 'Parsing %s' % config_path
issues_dict = {}
dom = minidom.parse(config_path)
for issue in dom.getElementsByTagName('issue'):
issue_id = issue.attributes['id'].value
severity = issue.getAttribute('severity')
path_elements = (
p.attributes.get('path')
for p in issue.getElementsByTagName('ignore'))
paths = set(p.value for p in path_elements if p)
regexp_elements = (
p.attributes.get('regexp')
for p in issue.getElementsByTagName('ignore'))
regexps = set(r.value for r in regexp_elements if r)
issues_dict[issue_id] = _Issue(severity, paths, regexps)
return issues_dict
def generate_parses(causal_tree):
node_type = causal_tree["node_type"]
if "children" not in causal_tree:
return (causal_tree,)
partial_causal_parses = []
# make a copy of the current node, minus the children (so we're keeping symbol_type, symbol, energy, node_type, etc)
current_node = causal_tree.copy()
current_node.pop("children")
if node_type in ("or","root",):
for child_node in causal_tree["children"]:
for parse in generate_parses(child_node):
current_node["children"] = (parse,)
partial_causal_parses.append(current_node.copy())
elif node_type in ("and",):
# generate causal parses on each tree
# build all cartesian products of those causal parses;
# each cartesian product is a set of children for the and node, a separate partial parse graph to return
child_parses = []
for child_node in causal_tree["children"]:
child_parses.append(generate_parses(child_node),)
for product in itertools.product(*child_parses):
current_node["children"] = product
partial_causal_parses.append(current_node.copy())
else:
raise Exception("UNKNOWN NODE TYPE: {}".format(node_type))
return partial_causal_parses
def parse_can_jump_from(parse,prev_parse):
# "timer" -> "jump"
timer = get_symbol_matches_from_parse("timer",prev_parse)
jump = get_symbol_matches_from_parse("jump",prev_parse)
if timer and jump and timer["alternate"] == parse["symbol"] and timer["symbol"] == jump["symbol"]:
return True
return False
def get_symbol_matches_from_parse(symbol,parse):
matches = []
if "symbol_type" in parse:
if parse["symbol_type"] == symbol:
matches.append(parse)
if "children" in parse:
for child in parse["children"]:
child_matches = get_symbol_matches_from_parse(symbol,child)
matches += child_matches
return matches
def get_actions_used(parse, first = True):
actions = set()
if "symbol_type" in parse and parse["symbol_type"] in ("event",):
actions.add(parse['symbol'])
#print("EVENT USED: {}".format(causal_tree['symbol']))
if "children" in parse:
for child in parse['children']:
actions.update(get_actions_used(child, False))
return actions
def complete_parse_tree(active_parse_tree, fluent_hash, event_hash, frame, completions, source, event_timeouts):
# we have a winner! let's show them what they've won, bob!
global debug_calculate_energy
#debug_calculate_energy = True
### don't need this energy = calculate_energy(active_parse_tree, get_energies(fluent_hash, event_hash))
debug_calculate_energy = False
fluent = active_parse_tree["symbol"]
agents_responsible = []
# if there are agents in the parse, print out who they were
keys = get_fluent_and_event_keys_we_care_about((active_parse_tree,))
# WARNING: if we have two event types in the same parse, we can wind up adding the same parse multiple times.
# BACKLOG: make sure the "if not found" solution below doesn't break anything else when it solves the above
for event_key in keys["events"]:
event = get_best_energy_event(event_hash[event_key]['energies'],newerthan=(frame - event_timeouts[event_key]))
agent = event["agent"]
if agent:
agents_responsible.append(agent,)
if "_" in fluent:
prefix, postfix = fluent.rsplit("_",1)
if postfix in ("on","off",):
fluent = prefix
if fluent not in completions:
completions[fluent] = {}
completion = completions[fluent]
if frame not in completion:
completion[frame] = []
completion_frame = completion[frame]
found = False
for item in completion_frame:
if item['parse']['id'] == active_parse_tree['id']:
found = True
break
if not found:
#completion_frame.append({"frame": frame, "fluent": fluent, "energy": energy, "parse": active_parse_tree, "agents": agents_responsible, "sum": fluent_hash[active_parse_tree['symbol']]['energy'], 'source': source})
completion_frame.append({"frame": frame, "fluent": fluent, "parse": active_parse_tree, "agents": agents_responsible, "sum": fluent_hash[active_parse_tree['symbol']]['energy'], 'source': source})
#print("{}".format("\t".join([str(fluent),str(frame),"{:g}".format(energy),str(make_tree_like_lisp(active_parse_tree)),str(agents_responsible)])))
#print("{} PARSE TREE {} COMPLETED at {}: energy({}) BY {}\n{}\n***{}***".format(fluent,active_parse_tree['id'],frame,energy,source,make_tree_like_lisp(active_parse_tree),active_parse_tree))
#print("Agents responsible: {}".format(agents_responsible))
if kDebugEnergies:
debug_energies(fluent_hash, event_hash)
def add_missing_parses(fluent, fluent_hash, event_hash, frame, completions):
## here we're just getting the completions for one specific frame
## we want to go through all the possible parses for that fluent
## and make sure they're spoken for in completions
#print "ADDING MISSING PARSES"
for symbol in (completions[0]['parse']['symbol'],):
parse_ids_completed = []
for completion in completions:
parse_ids_completed.append(completion['parse']['id'])
#print("IDS: {}".format(parse_ids_completed))
anti_symbol = invert_name(symbol)
possible_trees = fluent_hash[symbol]['trees']
unpossible_trees = fluent_hash[anti_symbol]['trees']
for possible_tree in possible_trees + unpossible_trees:
# if this tree is a "primary" for this symbol
if possible_tree['symbol'] in (symbol,anti_symbol):
other_parses = possible_tree['parses']
for other_parse in other_parses:
if other_parse['id'] not in parse_ids_completed:
parse_ids_completed.append(other_parse['id'])
#print("ADDING ID: {}".format(other_parse['id']))
#complete_parse_tree(other_parse, fluent_hash, event_hash, effective_frames[symbol], completions, 'missing') ### what is this 'effective frames' thing?
#complete_parse_tree(other_parse, fluent_hash, event_hash, frame, completions, 'missing')
# we have a winner! let's show them what they've won, bob!
#### don't need this energy = calculate_energy(other_parse, get_energies(fluent_hash, event_hash))
agents_responsible = []
source = 'missing'
#completions.append({"frame": frame, "fluent": fluent, "energy": energy, "parse": other_parse, "agents": agents_responsible, "sum": fluent_hash[other_parse['symbol']]['energy'], 'source': source})
completions.append({"frame": frame, "fluent": fluent, "parse": other_parse, "agents": agents_responsible, "sum": fluent_hash[other_parse['symbol']]['energy'], 'source': source})
#print("{}".format("\t".join([str(fluent),str(frame),"{:g}".format(energy),str(make_tree_like_lisp(other_parse)),str(agents_responsible)])))
#print("{} PARSE TREE {} COMPLETED at {}: energy({}) BY {}\n{}\n***{}***".format(fluent,other_parse['id'],frame,energy,source,make_tree_like_lisp(other_parse),other_parse))
#print("Agents responsible: {}".format(agents_responsible))
if kDebugEnergies:
debug_energies(fluent_hash, event_hash)
#print "---"
return completions
# clears out any parses that have not been touched within N frames, printing out any over reporting_threshold_energy
def get_actions_from_parse(parse):
actions = []
if "symbol_type" in parse:
if parse["symbol_type"] == "event":
#tmp_event, tmp_event_value = parse["symbol"].rsplit("_",1)
actions.append(parse["symbol"])
if "children" in parse:
for child in parse["children"]:
child_actions = get_actions_from_parse(child)
actions += child_actions
return actions
load_lowe_examples_into_db_details.py 文件源码
项目:ochem_predict_nn
作者: connorcoley
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def main(db_fpath, N = 15):
'''Read reactions from Lowe's patent reaction SMILES'''
try:
# Open file
file_generator = get_reaction_file(db_fpath)
print(file_generator)
documents = []
for i, rxn in enumerate(file_generator):
if i == N:
break
print('~~~~~~~ {} ~~~~~~'.format(i))
print('{}: {}'.format(i, rxn))
document = minidom.parse(rxn)
try:
dic = doc_to_dic(document)
dic['random'] = random()
documents.append(dic)
except ValueError as e:
print(e)
# Report progress and insert every 1000
if ((i+1) % 1000) == 0:
print('{}/{}'.format(i+1, N))
result = collection.insert(documents)
documents = []
if documents: result = collection.insert(documents)
except KeyboardInterrupt:
print('Stopped early!')
print('Created {} database entries'.format(collection.find().count()))
return True
def test_DomainManagerId(self):
self.assertNotEqual(self._domMgr, None)
# Load the ID from the XML file
dom = minidom.parse(os.path.join(scatest.getSdrPath(), "dom/domain/DomainManager.dmd.xml"))
expectedId = dom.getElementsByTagName("domainmanagerconfiguration")[0].getAttribute("id")
providedId = self._domMgr._get_identifier()
self.assertEqual(providedId, expectedId, msg="Violation of SR:213 and/or SR:214")
# According to SCA section D.8.1, the id is supposed to be a DCE UUID
self.assertIsDceUUID(expectedId, msg="Violation of SCA D.8.1")
def xml(self, name):
"""Extra a parsed DOM of the given XML file in the archive"""
if self.xmlFiles.has_key(name):
return self.xmlFiles[name]
try:
f = self.zf.open(name)
except:
return None
dom = parse(f)
if USE_EL_TREE:
dom = dom.getroot()
self.xmlFiles[name] = dom
return dom
def makeRoot(cls, xmlFileName):
return cls(minidom.parse(xmlFileName))
def bingSearch(query, index):
global BING_URLS
url = 'http://api.search.live.net/xml.aspx?Appid='+BING_APP_ID+'&query='\
+urllib.quote(query)+'&sources=web&market=en-us&web.count=50&web.offset='+str(index)
xml = minidom.parse(urllib2.urlopen(url))
for node in xml.getElementsByTagName('web:Url'):
BING_URLS.append(node.childNodes[0].data)
# Checks the urls for errors
def getExif(photo_id_):
method = 'flickr.photos.getExif'
data = _doget(method, photo_id=photo_id_)
return Exif.parse(data.rsp.photo)
def parse(photo):
camera = getattr(photo, 'camera', '')
tags = []
if hasattr(photo, 'exif'):
if isinstance(photo.exif, list):
tags = [ExifTag.parse(e) for e in photo.exif]
else:
tags = [ExifTag.parse(photo.exif)]
return Exif(camera, tags)
def parse(exif):
raw = ''
if hasattr(exif, 'raw'):
raw = exif.raw.text
clean = ''
if hasattr(exif, 'clean'):
clean = exif.clean.text
return ExifTag(exif.tagspace, exif.tagspaceid, exif.tag, exif.label,
raw, clean)
def _doget(method, auth=False, **params):
#uncomment to check you aren't killing the flickr server
#print "***** do get %s" % method
params = _prepare_params(params)
url = '%s%s/?api_key=%s&method=%s&%s%s'% \
(HOST, API, API_KEY, method, urlencode(params),
_get_auth_url_suffix(method, auth, params))
#another useful debug print statement
if debug:
print "_doget", url
return _get_data(minidom.parse(urlopen(url)))
def __init__(self, filepath):
self.filepath = filepath
self.xmldoc = minidom.parse(filepath)
self.components = self.xmldoc.getElementsByTagName('components')[0]
self.components = self.components.getElementsByTagName('comp')