def _build_rdf(self, data=None):
'''
Parse incoming rdf as self.rdf.orig_graph, create copy at self.rdf.graph
Args:
data (): payload from GET request, expected RDF content in various serialization formats
Returns:
None
'''
# recreate rdf data
self.rdf = SimpleNamespace()
self.rdf.data = data
self.rdf.prefixes = SimpleNamespace()
self.rdf.uris = SimpleNamespace()
# populate prefixes
for prefix,uri in self.repo.context.items():
setattr(self.rdf.prefixes, prefix, rdflib.Namespace(uri))
# graph
self._parse_graph()
python类Namespace()的实例源码
def add_namespace(self, ns_prefix, ns_uri):
'''
preferred method is to instantiate with repository under 'context',
but prefixes / namespaces can be added for a Resource instance
adds to self.rdf.prefixes which will endure through create/update/refresh,
and get added back to parsed graph namespaces
Args:
ns_prefix (str): prefix for namespace, e.g. 'dc', 'foaf'
ns_uri (str): string of namespace / ontology. e.g. 'http://purl.org/dc/elements/1.1/', 'http://xmlns.com/foaf/0.1/'
Returns:
None: binds this new prefix:namespace combination to self.rdf.prefixes for use, and self.rdf.graph for serialization
'''
# add to prefixes
setattr(self.rdf.prefixes, ns_prefix, rdflib.Namespace(ns_uri))
# bind to graph
self.rdf.namespace_manager.bind(ns_prefix, ns_uri, override=False)
def verify_rdf(rdf_output):
ids_ns = Namespace("http://foo.example.org/CSV/People-IDs/")
ages_ns = Namespace("http://foo.example.org/CSV/People-Ages/")
g = ConjunctiveGraph()
g.parse(data=rdf_output, format="turtle")
all_subjects = {x for x in g.subjects()}
assert len(all_subjects) == 2
bob_subj = ids_ns['1']
joe_subj = ids_ns['2']
assert bob_subj in all_subjects
assert joe_subj in all_subjects
# Bob's details
assert len([g.triples((bob_subj, ids_ns.id, Literal(1)))]) == 1
assert len([g.triples((bob_subj, ids_ns.name, Literal("Bob")))]) == 1
assert len([g.triples((bob_subj, ages_ns.age, Literal(34)))]) == 1
# Joe's details
assert len([g.triples((joe_subj, ids_ns.id, Literal(2)))]) == 1
assert len([g.triples((joe_subj, ids_ns.name, Literal("Joe")))]) == 1
assert len([g.triples((joe_subj, ages_ns.age, Literal(54)))]) == 1
def populate_entity(self, bf_class, existing_uri=None):
"""Takes a BIBFRAME graph and MODS XML, extracts info for each
entity's property and adds to graph.
Args:
bf_class(rdflib.URIRef): Namespace URI
Returns:
rdflib.URIRef: URI of new entity
"""
if existing_uri:
entity_uri = existing_uri
else:
# Check for custom IRIPattern
entity_uri = self.__pattern_uri__(bf_class)
# Finally generate an IRI from the default patterns
if not entity_uri:
entity_uri = self.__generate_uri__()
self.graph.add((entity_uri, rdflib.RDF.type, bf_class))
self.update_linked_classes(bf_class, entity_uri)
self.update_direct_properties(bf_class, entity_uri)
self.update_ordered_linked_classes(bf_class, entity_uri)
self.add_admin_metadata(entity_uri)
self.clean_rdf_types()
return entity_uri
def populate_entity(self, bf_class, existing_uri=None):
"""Takes a BIBFRAME graph and MODS XML, extracts info for each
entity's property and adds to graph.
Args:
bf_class(rdflib.URIRef): Namespace URI
Returns:
rdflib.URIRef: URI of new entity
"""
if existing_uri:
entity_uri = existing_uri
else:
# Check for custom IRIPattern
entity_uri = self.__pattern_uri__(bf_class)
# Finally generate an IRI from the default patterns
if not entity_uri:
entity_uri = self.__generate_uri__()
self.graph.add((entity_uri, rdflib.RDF.type, bf_class))
self.update_linked_classes(bf_class, entity_uri)
self.update_direct_properties(bf_class, entity_uri)
self.update_ordered_linked_classes(bf_class, entity_uri)
self.add_admin_metadata(entity_uri)
self.clean_rdf_types()
return entity_uri
def serialize(self, add, delete):
commit = Namespace("urn:commit:" + str(uuid.uuid1()) + ":")
eccrev = Namespace("https://vocab.eccenca.com/revision/")
g = ConjunctiveGraph()
namespace_manager = NamespaceManager(g)
namespace_manager.bind('eccrev', eccrev, override=False)
g.add((commit.term(""), RDF.type, eccrev.Commit))
graphUris = set(delete.keys()) | set(add.keys())
for graphUri in graphUris:
if (graphUri in delete.keys() and len(delete[graphUri]) > 0) or (graphUri in add.keys() and len(add[graphUri]) > 0):
revision = Namespace("urn:revision:" + str(uuid.uuid1()) + ":")
g.add((commit.term(""), eccrev.hasRevision, revision.term("")))
g.add((revision.term(""), RDF.type, eccrev.Revision))
if str(graphUri) != 'http://quitdiff.default/':
g.add((revision.term(""), eccrev.hasRevisionGraph, graphUri))
if graphUri in delete.keys() and len(delete[graphUri]) > 0:
deleteGraphName = revision.term(":delete")
g.add((revision.term(""), eccrev.deltaDelete, deleteGraphName))
for triple in delete[graphUri]:
g.add(triple + (deleteGraphName,))
if graphUri in add.keys() and len(add[graphUri]) > 0:
insertGraphName = revision.term(":insert")
g.add((revision.term(""), eccrev.deltaInsert, insertGraphName))
for triple in add[graphUri]:
g.add(triple + (insertGraphName,))
return g.serialize(format="trig").decode("utf-8")
def serialize(self, add, delete):
diff = Namespace("http://topbraid.org/diff#")
g = ConjunctiveGraph()
namespace_manager = NamespaceManager(g)
namespace_manager.bind('diff', diff, override=False)
namespace_manager.bind('owl', OWL, override=False)
graphUris = set(delete.keys()) | set(add.keys())
for graphUri in graphUris:
if (graphUri in delete.keys() and len(delete[graphUri]) > 0) or (graphUri in add.keys() and len(add[graphUri]) > 0):
changeset = Namespace("urn:diff:" + str(uuid.uuid1()))
graphTerm = changeset.term("")
if str(graphUri) != 'http://quitdiff.default/':
g.add((graphTerm, OWL.imports, graphUri, graphTerm))
g.add((graphTerm, RDF.type, OWL.Ontology, graphTerm))
g.add((graphTerm, OWL.imports, diff.term(""), graphTerm))
if graphUri in delete.keys() and len(delete[graphUri]) > 0:
i = 0
for triple in delete[graphUri]:
deleteStatementName = BNode()
g.add((deleteStatementName, RDF.type, diff.DeletedTripleDiff, graphTerm))
g.add((deleteStatementName, RDF.subject, triple[0], graphTerm))
g.add((deleteStatementName, RDF.predicate, triple[1], graphTerm))
g.add((deleteStatementName, RDF.object, triple[2], graphTerm))
i += 1
if graphUri in add.keys() and len(add[graphUri]) > 0:
i = 0
for triple in add[graphUri]:
insertGraphName = BNode()
g.add((insertGraphName, RDF.type, diff.AddedTripleDiff, graphTerm))
g.add((insertGraphName, RDF.subject, triple[0], graphTerm))
g.add((insertGraphName, RDF.predicate, triple[1], graphTerm))
g.add((insertGraphName, RDF.object, triple[2], graphTerm))
i += 1
return g.serialize(format="trig").decode("utf-8")
def serialize(self, add, delete):
changeset = Namespace("http://purl.org/vocab/changeset/schema#")
g = ConjunctiveGraph()
namespace_manager = NamespaceManager(g)
namespace_manager.bind('changeset', changeset, override=False)
graphUris = set(delete.keys()) | set(add.keys())
for graphUri in graphUris:
if (graphUri in delete.keys() and len(delete[graphUri]) > 0) or (graphUri in add.keys() and len(add[graphUri]) > 0):
diff = Namespace("urn:changeset:" + str(uuid.uuid1()))
graphTerm = diff.term("")
g.add((graphTerm, RDF.type, changeset.ChangeSet))
if str(graphUri) != 'http://quitdiff.default/':
g.add((graphTerm, changeset.subjectOfChange, graphUri))
if graphUri in delete.keys() and len(delete[graphUri]) > 0:
i = 0
for triple in delete[graphUri]:
deleteStatementName = BNode()
g.add((graphTerm, changeset.removal, deleteStatementName))
g.add((deleteStatementName, RDF.type, RDF.Statement))
g.add((deleteStatementName, RDF.subject, triple[0]))
g.add((deleteStatementName, RDF.predicate, triple[1]))
g.add((deleteStatementName, RDF.object, triple[2]))
i += 1
if graphUri in add.keys() and len(add[graphUri]) > 0:
i = 0
for triple in add[graphUri]:
insertGraphName = BNode()
g.add((graphTerm, changeset.addition, insertGraphName))
g.add((insertGraphName, RDF.type, RDF.Statement))
g.add((insertGraphName, RDF.subject, triple[0]))
g.add((insertGraphName, RDF.predicate, triple[1]))
g.add((insertGraphName, RDF.object, triple[2]))
i += 1
return g.serialize(format="turtle").decode("utf-8")
def rdf_from_sources(self, names, outputFormat = "pretty-xml", rdfOutput = False) :
"""
Extract and RDF graph from a list of RDFa sources and serialize them in one graph. The sources are parsed, the RDF
extracted, and serialization is done in the specified format.
@param names: list of sources, each can be a URI, a file name, or a file-like object
@keyword outputFormat: serialization format. Can be one of "turtle", "n3", "xml", "pretty-xml", "nt". "xml" and "pretty-xml", as well as "turtle" and "n3" are synonyms.
@return: a serialized RDF Graph
@rtype: string
"""
try :
from pyRdfaExtras import MyGraph
graph = MyGraph()
except :
graph = Graph()
for prefix in _bindings :
graph.bind(prefix,Namespace(_bindings[prefix]))
# the value of rdfOutput determines the reaction on exceptions...
for name in names :
self.graph_from_source(name, graph, rdfOutput)
return graph.serialize(format=outputFormat)
def test_concurrent2():
dns = Namespace(u"http://www.example.com/")
store = plugin.get("IOMemory", Store)()
g1 = Graph(store=store)
g2 = Graph(store=store)
g1.add((dns.Name, dns.prop, Literal(u"test")))
g1.add((dns.Name, dns.prop, Literal(u"test2")))
g1.add((dns.Name, dns.prop, Literal(u"test3")))
n = len(g1)
i = 0
for t in g1.triples((None, None, None)):
i+=1
g2.add(t)
# next line causes problems because it adds a new Subject that needs
# to be indexed in __subjectIndex dictionary in IOMemory Store.
# which invalidates the iterator used to iterate over g1
g2.add((dns.Name1, dns.prop1, Literal(u"test")))
g2.add((dns.Name1, dns.prop, Literal(u"test")))
g2.add((dns.Name, dns.prop, Literal(u"test4")))
assert i == n
def test_ns_localname_roundtrip():
XNS = rdflib.Namespace('http://example.net/fs')
g = rdflib.Graph()
g.bind('xns', str(XNS))
g.add((
rdflib.URIRef('http://example.com/thingy'),
XNS['lowecase.xxx-xxx_xxx'], # <- not round trippable
rdflib.Literal("Junk")))
turtledump = g.serialize(format="turtle").decode('utf-8')
xmldump = g.serialize().decode('utf-8')
g1 = rdflib.Graph()
g1.parse(data=xmldump)
g1.parse(data=turtledump, format="turtle")
def rdf_from_sources(self, names, outputFormat = "pretty-xml", rdfOutput = False) :
"""
Extract and RDF graph from a list of RDFa sources and serialize them in one graph. The sources are parsed, the RDF
extracted, and serialization is done in the specified format.
@param names: list of sources, each can be a URI, a file name, or a file-like object
@keyword outputFormat: serialization format. Can be one of "turtle", "n3", "xml", "pretty-xml", "nt". "xml" and "pretty-xml", as well as "turtle" and "n3" are synonyms.
@return: a serialized RDF Graph
@rtype: string
"""
try :
from pyRdfaExtras import MyGraph
graph = MyGraph()
except :
graph = Graph()
for prefix in _bindings :
graph.bind(prefix,Namespace(_bindings[prefix]))
# the value of rdfOutput determines the reaction on exceptions...
for name in names :
self.graph_from_source(name, graph, rdfOutput)
return graph.serialize(format=outputFormat)
def _parse_graph(self):
'''
use Content-Type from headers to determine parsing method
Args:
None
Return:
None: sets self.rdf by parsing data from GET request, or setting blank graph of resource does not yet exist
'''
# if resource exists, parse self.rdf.data
if self.exists:
self.rdf.graph = self.repo.api.parse_rdf_payload(self.rdf.data, self.headers)
# else, create empty graph
else:
self.rdf.graph = rdflib.Graph()
# bind any additional namespaces from repo instance, but do not override
self.rdf.namespace_manager = rdflib.namespace.NamespaceManager(self.rdf.graph)
for ns_prefix, ns_uri in self.rdf.prefixes.__dict__.items():
self.rdf.namespace_manager.bind(ns_prefix, ns_uri, override=False)
# conversely, add namespaces from parsed graph to self.rdf.prefixes
for ns_prefix, ns_uri in self.rdf.graph.namespaces():
setattr(self.rdf.prefixes, ns_prefix, rdflib.Namespace(ns_uri))
setattr(self.rdf.uris, rdflib.Namespace(ns_uri), ns_prefix)
# pin old graph to resource, create copy graph for modifications
self.rdf._orig_graph = copy.deepcopy(self.rdf.graph)
# parse triples for object-like access
self.parse_object_like_triples()
def __init__(
self,
configmode=None,
configfile='config.ttl',
repository=None,
targetdir=None,
versioning=True
):
"""The init method.
This method checks if the config file is given and reads the config file.
If the config file is missing, it will be generated after analyzing the
file structure.
"""
logger = logging.getLogger('quit.conf.QuitConfiguration')
logger.debug('Initializing configuration object.')
self.configchanged = False
self.sysconf = Graph()
self.graphconf = None
self.versioning = versioning
self.origin = None
self.graphs = {}
self.files = {}
self.quit = Namespace('http://quit.aksw.org/')
self.nsMngrSysconf = NamespaceManager(self.sysconf)
self.nsMngrSysconf.bind('', 'http://quit.aksw.org/', override=False)
self.nsMngrGraphconf = NamespaceManager(self.sysconf)
self.nsMngrGraphconf.bind('', 'http://quit.aksw.org/', override=False)
try:
self.__initstoreconfig(
repository=repository,
targetdir=targetdir,
configfile=configfile,
configmode=configmode
)
except InvalidConfigurationError as e:
logger.error(e)
raise e
return
def agent_relationship_inv_lod(request, agent_assoc_id):
aa = AgentAssociation.objects.filter(id=agent_assoc_id)
if not aa:
return HttpResponse({}, content_type='application/json')
else:
agent_association = aa[0]
from rdflib import Graph, Literal, BNode
from rdflib.namespace import FOAF, RDF, RDFS, OWL, SKOS
from rdflib.serializer import Serializer
from rdflib import Namespace, URIRef
path, instance_abbrv, context, store, vf_ns = get_lod_setup_items()
ref = URIRef(instance_abbrv + ":agent-relationship-inv-lod/" + str(agent_association.id) + "/")
inv_ref = URIRef(instance_abbrv + ":agent-relationship-lod/" + str(agent_association.id) + "/")
ref_object = URIRef(instance_abbrv + ":agent-lod/" + str(agent_association.is_associate.id) + "/")
ref_subject = URIRef(instance_abbrv + ":agent-lod/" + str(agent_association.has_associate.id) + "/")
property_name = camelcase_lower(agent_association.association_type.inverse_label)
ref_relationship = URIRef(instance_abbrv + ":agent-relationship-type-lod/" + property_name)
store.add((ref, RDF.type, vf_ns["Relationship"]))
store.add((ref, vf_ns["subject"], ref_subject))
store.add((ref, vf_ns["object"], ref_object))
store.add((ref, vf_ns["relationship"], ref_relationship))
store.add((ref, OWL.inverseOf, inv_ref))
ser = store.serialize(format='json-ld', context=context, indent=4)
return HttpResponse(ser, content_type='application/json')
#return render_to_response("valueaccounting/agent_association.html", {
# "agent_association": agent_association,
#}, context_instance=RequestContext(request))
def agent_type_lod(request, agent_type_name):
ats = AgentType.objects.all()
agent_type = None
#import pdb; pdb.set_trace()
for at in ats:
if camelcase(at.name) == agent_type_name:
agent_type = at
if not agent_type:
return HttpResponse({}, content_type='application/json')
from rdflib import Graph, Literal, BNode
from rdflib.namespace import FOAF, RDF, RDFS, OWL, SKOS
from rdflib.serializer import Serializer
from rdflib import Namespace, URIRef
path, instance_abbrv, context, store, vf_ns = get_lod_setup_items()
if agent_type.name != "Person" and agent_type.name != "Group" and agent_type.name != "Individual":
class_name = camelcase(agent_type.name)
ref = URIRef(instance_abbrv + ":agent-type-lod/" +class_name)
store.add((ref, RDF.type, OWL.Class))
store.add((ref, SKOS.prefLabel, Literal(class_name, lang="en")))
if agent_type.party_type == "individual":
store.add((ref, RDFS.subClassOf, vf_ns.Person))
else:
store.add((ref, RDFS.subClassOf, vf_ns.Group))
ser = store.serialize(format='json-ld', context=context, indent=4)
return HttpResponse(ser, content_type='application/json')
#return render_to_response("valueaccounting/agent_type.html", {
# "agent_type": agent_type,
#}, context_instance=RequestContext(request))
def agent_relationship_type_lod(request, agent_assoc_type_name):
#import pdb; pdb.set_trace()
aats = AgentAssociationType.objects.all()
agent_assoc_type = None
for aat in aats:
if camelcase_lower(aat.label) == agent_assoc_type_name:
agent_assoc_type = aat
inverse = False
elif camelcase_lower(aat.inverse_label) == agent_assoc_type_name:
agent_assoc_type = aat
inverse = True
if not agent_assoc_type:
return HttpResponse({}, content_type='application/json')
from rdflib import Graph, Literal, BNode
from rdflib.namespace import FOAF, RDF, RDFS, OWL, SKOS
from rdflib.serializer import Serializer
from rdflib import Namespace, URIRef
path, instance_abbrv, context, store, vf_ns = get_lod_setup_items()
if inverse:
property_name = camelcase_lower(agent_assoc_type.inverse_label)
inverse_property_name = camelcase_lower(agent_assoc_type.label)
label = agent_assoc_type.inverse_label
else:
property_name = camelcase_lower(agent_assoc_type.label)
inverse_property_name = camelcase_lower(agent_assoc_type.inverse_label)
label = agent_assoc_type.label
ref = URIRef(instance_abbrv + ":agent-relationship-type-lod/" + property_name)
inv_ref = URIRef(instance_abbrv + ":agent-relationship-type-lod/" + inverse_property_name)
store.add((ref, RDF.type, RDF.Property))
store.add((ref, SKOS.prefLabel, Literal(label, lang="en")))
store.add((ref, OWL.inverseOf, inv_ref))
ser = store.serialize(format='json-ld', context=context, indent=4)
return HttpResponse(ser, content_type='application/json')
#return render_to_response("valueaccounting/agent_assoc_type.html", {
# "agent_assoc_type": agent_assoc_type,
#}, context_instance=RequestContext(request))
def agent_relationship_inv_lod(request, agent_assoc_id):
aa = AgentAssociation.objects.filter(id=agent_assoc_id)
if not aa:
return HttpResponse({}, content_type='application/json')
else:
agent_association = aa[0]
from rdflib import Graph, Literal, BNode
from rdflib.namespace import FOAF, RDF, RDFS, OWL, SKOS
from rdflib.serializer import Serializer
from rdflib import Namespace, URIRef
path, instance_abbrv, context, store, vf_ns = get_lod_setup_items()
ref = URIRef(instance_abbrv + ":agent-relationship-inv-lod/" + str(agent_association.id) + "/")
inv_ref = URIRef(instance_abbrv + ":agent-relationship-lod/" + str(agent_association.id) + "/")
ref_object = URIRef(instance_abbrv + ":agent-lod/" + str(agent_association.is_associate.id) + "/")
ref_subject = URIRef(instance_abbrv + ":agent-lod/" + str(agent_association.has_associate.id) + "/")
property_name = camelcase_lower(agent_association.association_type.inverse_label)
ref_relationship = URIRef(instance_abbrv + ":agent-relationship-type-lod/" + property_name)
store.add((ref, RDF.type, vf_ns["Relationship"]))
store.add((ref, vf_ns["subject"], ref_subject))
store.add((ref, vf_ns["object"], ref_object))
store.add((ref, vf_ns["relationship"], ref_relationship))
store.add((ref, OWL.inverseOf, inv_ref))
ser = store.serialize(format='json-ld', context=context, indent=4)
return HttpResponse(ser, content_type='application/json')
#return render_to_response("valueaccounting/agent_association.html", {
# "agent_association": agent_association,
#}, context_instance=RequestContext(request))
def build(self):
ds = self.graph
self.context = {"ce":
"https://raw.githubusercontent.com/Vocamp/ComputationalActivity/master/pattern/ComputationalEnvironment.jsonld"}
CE = Namespace("http://dase.cs.wright.edu/ontologies/ComputationalEnvironment#")
CA = Namespace("http://dase.cs.wright.edu/ontologies/ComputationalActivity#")
DOCKER = Namespace("http://w3id.org/daspos/docker#")
info = cpuinfo.get_cpu_info()
# ISSUES: We want if the architecture URI's to be created only once on
# build or initial commit. Otherwise, we want to re-read the URI's
# from the original graph. There are imm
ds.bind("ce", CE)
ceuri = URIRef(str(uuid.uuid4()))
ds.add((ceuri, RDF.type, CE.ComputationalEnvironment))
osUri = URIRef(str(uuid.uuid4()))
ds.add((ceuri, CE.hasOperatingSystem, osUri))
ds.add((osUri, RDFS.label, Literal("linux")))
processorUri = URIRef(str(uuid.uuid4()))
ds.add((ceuri, CE.hasHardware, processorUri))
archUri = URIRef(str(uuid.uuid4()))
ds.add((processorUri, CE.hasArchitecture, archUri))
ds.add((archUri, RDFS.label, Literal("amd64")))
ds.add((processorUri, CE.hasNumberOfCores,
Literal("4", datatype=XSD.nonNegativeInteger)))
# :hasArchitecture
# :hasNumberOfCores
# :hasOperatingSystem
# :hasSize Memory or HD
# :isAvailable
# :VirtualMACAddress
def build(self):
self.context = {"prov": "http://www.w3.org/ns/prov#"}
PROV = Namespace("http://www.w3.org/ns/prov#")
chuckORIDchuck = URIRef("http://orcid.org/000-0003-4901-6059")
self.graph.add((chuckORIDchuck, RDF.type, PROV.Person))
def build(self):
self.context = {"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#"}
UUIDNS = Namespace("urn:uuid:")
self.graph.bind("uuidns", UUIDNS)
self.graph.add((UUIDNS[tstuuid], RDFS.label, Literal(
"Docker: https://www.docker.com/")))
def setUp(self):
self.processor = processor.Processor(
rml_rules=os.path.join(FIXURES_PATH,
"rml-basic.ttl"))
self.rr = rdflib.Namespace("http://www.w3.org/ns/r2rml#")
self.test_map = SimpleNamespace()
self.test_map.reference = None
def __init__(self, rml_rules):
self.rml = rdflib.Graph()
if isinstance(rml_rules, list):
for rule in rml_rules:
# First check if rule exists on the filesystem
if os.path.exists(rule):
with open(rule) as file_obj:
raw_rule = file_obj.read()
else:
raw_rule = get_map(rule).decode()
self.rml.parse(data=raw_rule,
format='turtle')
elif isinstance(rml_rules, (rdflib.Graph, rdflib.ConjunctiveGraph)):
self.rml = rml_rules
elif os.path.exists(rml_rules):
self.rml.parse(rml_rules, format='turtle')
else:
self.rml.parse(data=get_map(rml_rules).decode(), format='turtle')
# Populate Namespaces Manager
for prefix, namespace in self.rml.namespaces():
setattr(NS_MGR, prefix, rdflib.Namespace(namespace))
self.output, self.source, self.triplestore_url = None, None, None
self.parents = set()
self.constants = dict(version=__version__)
self.triple_maps = dict()
for row in self.rml.query(GET_TRIPLE_MAPS):
triple_map_iri = row[0]
map_key = str(triple_map_iri)
self.triple_maps[map_key] = SimpleNamespace()
self.triple_maps[map_key].logicalSource = \
self.__logical_source__(triple_map_iri)
self.triple_maps[map_key].subjectMap = \
self.__subject_map__(triple_map_iri)
self.triple_maps[map_key].predicateObjectMap = \
self.__predicate_object_map__(triple_map_iri)
def test_marmotta_server(self):
slug = ''.join(random.choice(string.ascii_letters) for _ in range(random.randint(3, 10)))
self.assertIsInstance(self.marmotta.server, Namespace)
self.assertIsInstance(self.marmotta.server[slug], URIRef)
self.assertEqual(str(self.marmotta.server[slug]), self.host+"/"+slug)
def __init__(self):
self.ns = Namespace("http://www.w3.org/ns/ldp#")
def __init__(self, srv):
self.server = Namespace(srv) if srv.endswith("/") else Namespace(srv+"/")
self.ldp = lambda slug=None: self.server.ldp if slug is None else self.server["ldp"+slug[:-1]] if slug.startswith("/") and slug.endswith("/") else self.server["ldp"+slug] if slug.startswith("/") else self.server["ldp/"+slug[:-1]] if slug.endswith("/") else self.server["ldp/"+slug]
self.sparql = Struct(select=self.server["sparql/select"], update=self.server["sparql/update"])
def define_namespace(self):
"""
Method used to set standard names (dbr stands for dbpediaresource, dbp for dbpediaproperty,
dbo for dbpediaontology)
:return:
"""
if self.chapter != 'en':
self.dbr = rdflib.Namespace("http://" + self.chapter + ".dbpedia.org/resource/")
else:
self.dbr = rdflib.Namespace("http://dbpedia.org/resource/")
self.dbo = rdflib.Namespace("http://dbpedia.org/ontology/")
self.dbp = rdflib.Namespace("http://dbpedia.org/property/")
def __init__(self, state, top_level) :
"""
@param state: the state behind this term mapping
@type state: L{state.ExecutionContext}
@param top_level : whether this is the top node of the DOM tree (the only place where initial contexts are handled)
@type top_level : boolean
"""
self.state = state
# This is to store the local terms
self.terms = {}
# This is to store the local Namespaces (a.k.a. prefixes)
self.ns = {}
# Default vocabulary
self.vocabulary = None
if state.rdfa_version < "1.1" or top_level == False :
return
from .initialcontext import initial_context as context_data
from .host import initial_contexts as context_ids
from .host import default_vocabulary
for id in context_ids[state.options.host_language] :
# This gives the id of a initial context, valid for this media type:
data = context_data[id]
# Merge the context data with the overall definition
if state.options.host_language in default_vocabulary :
self.vocabulary = default_vocabulary[state.options.host_language]
elif data.vocabulary != "" :
self.vocabulary = data.vocabulary
for key in data.terms :
self.terms[key] = URIRef(data.terms[key])
for key in data.ns :
self.ns[key] = (Namespace(data.ns[key]),False)
##################################################################################################################
def rdf_from_sources(self, names, outputFormat = "turtle", rdfOutput = False) :
"""
Extract and RDF graph from a list of RDFa sources and serialize them in one graph. The sources are parsed, the RDF
extracted, and serialization is done in the specified format.
@param names: list of sources, each can be a URI, a file name, or a file-like object
@keyword outputFormat: serialization format. Can be one of "turtle", "n3", "xml", "pretty-xml", "nt". "xml", "pretty-xml", "json" or "json-ld". "turtle" and "n3", "xml" and "pretty-xml", and "json" and "json-ld" are synonyms, respectively. Note that the JSON-LD serialization works with RDFLib 3.* only.
@keyword rdfOutput: controls what happens in case an exception is raised. If the value is False, the caller is responsible handling it; otherwise a graph is returned with an error message included in the processor graph
@type rdfOutput: boolean
@return: a serialized RDF Graph
@rtype: string
"""
# This is better because it gives access to the various, non-standard serializations
# If it does not work because the extra are not installed, fall back to the standard
# rdlib distribution...
try :
from pyRdfaExtras import MyGraph
graph = MyGraph()
except :
graph = Graph()
# graph.bind("xsd", Namespace('http://www.w3.org/2001/XMLSchema#'))
# the value of rdfOutput determines the reaction on exceptions...
for name in names :
self.graph_from_source(name, graph, rdfOutput)
retval = graph.serialize(format=outputFormat)
return retval
def contexts(self, triple=None):
"""
Iterates over results to "SELECT ?NAME { GRAPH ?NAME { ?s ?p ?o } }"
or "SELECT ?NAME { GRAPH ?NAME {} }" if triple is `None`.
Returns instances of this store with the SPARQL wrapper
object updated via addNamedGraph(?NAME).
This causes a named-graph-uri key / value pair to be sent over
the protocol.
Please note that some SPARQL endpoints are not able to find empty named
graphs.
"""
self.resetQuery()
if triple:
nts = self.node_to_sparql
s, p, o = triple
params = (nts(s if s else Variable('s')),
nts(p if p else Variable('p')),
nts(o if o else Variable('o')))
self.setQuery('SELECT ?name WHERE { GRAPH ?name { %s %s %s }}' % params)
else:
self.setQuery('SELECT ?name WHERE { GRAPH ?name {} }')
doc = ElementTree.parse(SPARQLWrapper.query(self).response)
return (
rt.get(Variable("name"))
for rt, vars in _traverse_sparql_result_dom(
doc, as_dictionary=True, node_from_result=self.node_from_result)
)
# Namespace persistence interface implementation