def test_collection_render(self):
foo = Namespace('http://www.example.org/foo/ns/')
ex = Namespace('http://www.example.org/example/foo/')
rdf = Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
# Works: x a rdf:List, a foo:Other ;
# Fails: y a foo:Wrapper, foo:wraps x; x a rdf:List, a foo:Other ;
target1 = ConjunctiveGraph()
target1.parse(data=target1xml)
target2 = ConjunctiveGraph()
target2.parse(data=target2xml)
g = ConjunctiveGraph()
bits = [ex['a'], ex['b'], ex['c']]
l = Collection(g, ex['thing'], bits)
triple = (ex['thing'], rdf['type'], foo['Other'])
g.add(triple)
triple = (ex['thing'], foo['property'], Literal('Some Value'))
g.add(triple)
for b in bits:
triple = (b, rdf['type'], foo['Item'])
g.add(triple)
self.assertEqual(g.isomorphic(target1), True)
# g.add((ex['wrapper'], rdf['type'], foo['Wrapper']))
# g.add((ex['wrapper'], foo['wraps'], ex['thing']))
# # resn3 = g.serialize(format="n3")
# # print(resn3)
# resxml = g.serialize(format="pretty-xml")
# # print(resxml)
# self.assertEqual(g.isomorphic(target2), True)
python类Namespace()的实例源码
def test_03_get_value(self):
# is the name of entity E10009 "Arco Publications"?
# (in graph http://bibliographica.org/entity/E10009)
# Looking for:
# <http://bibliographica.org/entity/E10009>
# <http://xmlns.com/foaf/0.1/name>
# "Arco Publications"
# <http://bibliographica.org/entity/E10009>
g = self._load_example()
s = URIRef("http://bibliographica.org/entity/E10009")
FOAF = Namespace("http://xmlns.com/foaf/0.1/")
self.assertTrue(g.value(s, FOAF.name).eq("Arco Publications"))
def __init__(self, state, top_level) :
"""
@param state: the state behind this term mapping
@type state: L{state.ExecutionContext}
@param top_level : whether this is the top node of the DOM tree (the only place where initial contexts are handled)
@type top_level : boolean
"""
self.state = state
# This is to store the local terms
self.terms = {}
# This is to store the local Namespaces (a.k.a. prefixes)
self.ns = {}
# Default vocabulary
self.vocabulary = None
if state.rdfa_version < "1.1" or top_level == False :
return
from .initialcontext import initial_context as context_data
from .host import initial_contexts as context_ids
from .host import default_vocabulary
for id in context_ids[state.options.host_language] :
# This gives the id of a initial context, valid for this media type:
data = context_data[id]
# Merge the context data with the overall definition
if state.options.host_language in default_vocabulary :
self.vocabulary = default_vocabulary[state.options.host_language]
elif data.vocabulary != "" :
self.vocabulary = data.vocabulary
for key in data.terms :
self.terms[key] = URIRef(data.terms[key])
for key in data.ns :
self.ns[key] = (Namespace(data.ns[key]),False)
##################################################################################################################
def rdf_from_sources(self, names, outputFormat = "turtle", rdfOutput = False) :
"""
Extract and RDF graph from a list of RDFa sources and serialize them in one graph. The sources are parsed, the RDF
extracted, and serialization is done in the specified format.
@param names: list of sources, each can be a URI, a file name, or a file-like object
@keyword outputFormat: serialization format. Can be one of "turtle", "n3", "xml", "pretty-xml", "nt". "xml", "pretty-xml", "json" or "json-ld". "turtle" and "n3", "xml" and "pretty-xml", and "json" and "json-ld" are synonyms, respectively. Note that the JSON-LD serialization works with RDFLib 3.* only.
@keyword rdfOutput: controls what happens in case an exception is raised. If the value is False, the caller is responsible handling it; otherwise a graph is returned with an error message included in the processor graph
@type rdfOutput: boolean
@return: a serialized RDF Graph
@rtype: string
"""
# This is better because it gives access to the various, non-standard serializations
# If it does not work because the extra are not installed, fall back to the standard
# rdlib distribution...
try :
from pyRdfaExtras import MyGraph
graph = MyGraph()
except :
graph = Graph()
# graph.bind("xsd", Namespace('http://www.w3.org/2001/XMLSchema#'))
# the value of rdfOutput determines the reaction on exceptions...
for name in names :
self.graph_from_source(name, graph, rdfOutput)
retval = graph.serialize(format=outputFormat)
return retval
def contexts(self, triple=None):
"""
Iterates over results to "SELECT ?NAME { GRAPH ?NAME { ?s ?p ?o } }"
or "SELECT ?NAME { GRAPH ?NAME {} }" if triple is `None`.
Returns instances of this store with the SPARQL wrapper
object updated via addNamedGraph(?NAME).
This causes a named-graph-uri key / value pair to be sent over
the protocol.
Please note that some SPARQL endpoints are not able to find empty named
graphs.
"""
self.resetQuery()
if triple:
s, p, o = triple
params = ((s if s else Variable('s')).n3(),
(p if p else Variable('p')).n3(),
(o if o else Variable('o')).n3())
self.setQuery('SELECT ?name WHERE { GRAPH ?name { %s %s %s }}' % params)
else:
self.setQuery('SELECT ?name WHERE { GRAPH ?name {} }')
doc = ElementTree.parse(SPARQLWrapper.query(self).response)
return (
rt.get(Variable("name"))
for rt, vars in _traverse_sparql_result_dom(
doc, as_dictionary=True, node_from_result=self.node_from_result)
)
# Namespace persistence interface implementation
def _frament_fill_meta(self, tpq, fragment, last_result, total_nb_triples, nb_triple_per_page, request, tpf_url):
meta_graph = self._tpf_uri(tpf_url, 'metadata')
fragment.add_graph(meta_graph)
dataset_base = self._tpf_uri(tpf_url)
source = URIRef(request.build_absolute_uri())
dataset_template = Literal('%s%s' % (dataset_base, '{?subject,predicate,object}'))
data_graph = self._tpf_uri(tpf_url, 'dataset')
tp_node = BNode('triplePattern')
subject_node = BNode('subject')
predicate_node = BNode('predicate')
object_node = BNode('object')
HYDRA = Namespace("http://www.w3.org/ns/hydra/core#")
VOID = Namespace("http://rdfs.org/ns/void#")
FOAF = Namespace("http://xmlns.com/foaf/0.1/")
DCTERMS = Namespace("http://purl.org/dc/terms/")
fragment.add_meta_quad(meta_graph, FOAF['primaryTopic'], dataset_base, meta_graph)
fragment.add_meta_quad(data_graph, HYDRA['member'], data_graph, meta_graph)
fragment.add_meta_quad(data_graph, RDF.type, VOID['Dataset'], meta_graph)
fragment.add_meta_quad(data_graph, RDF.type, HYDRA['Collection'], meta_graph)
fragment.add_meta_quad(data_graph, VOID['subset'], source, meta_graph)
fragment.add_meta_quad(data_graph, VOID['uriLookupEndpoint'], dataset_template, meta_graph)
fragment.add_meta_quad(data_graph, HYDRA['search'], tp_node, meta_graph)
fragment.add_meta_quad(tp_node, HYDRA['template'], dataset_template, meta_graph)
fragment.add_meta_quad(tp_node, HYDRA['variableRepresentation'], HYDRA['ExplicitRepresentation'], meta_graph)
fragment.add_meta_quad(tp_node, HYDRA['mapping'], subject_node, meta_graph)
fragment.add_meta_quad(tp_node, HYDRA['mapping'], predicate_node, meta_graph)
fragment.add_meta_quad(tp_node, HYDRA['mapping'], object_node, meta_graph)
fragment.add_meta_quad(subject_node, HYDRA['variable'], Literal("subject"), meta_graph)
fragment.add_meta_quad(subject_node, HYDRA['property'], RDF.subject, meta_graph)
fragment.add_meta_quad(predicate_node, HYDRA['variable'], Literal("predicate"), meta_graph)
fragment.add_meta_quad(predicate_node, HYDRA['property'], RDF.predicate, meta_graph)
fragment.add_meta_quad(object_node, HYDRA['variable'], Literal("object"), meta_graph)
fragment.add_meta_quad(object_node, HYDRA['property'], RDF.object, meta_graph)
fragment.add_meta_quad(dataset_base, VOID['subset'], source, meta_graph)
fragment.add_meta_quad(source, RDF.type, HYDRA['PartialCollectionView'], meta_graph)
fragment.add_meta_quad(source, DCTERMS['title'], Literal("TPF Twitter search API 1.1"), meta_graph)
fragment.add_meta_quad(source, DCTERMS['description'], Literal("Triple Pattern from the twitter api matching the pattern {?s=%s, ?p=%s, ?o=%s}" % (tpq.subject, tpq.predicate, tpq.obj)), meta_graph)
fragment.add_meta_quad(source, DCTERMS['source'], data_graph, meta_graph)
fragment.add_meta_quad(source, HYDRA['totalItems'], Literal(total_nb_triples, datatype=XSD.int), meta_graph)
fragment.add_meta_quad(source, VOID['triples'], Literal(total_nb_triples, datatype=XSD.int), meta_graph)
fragment.add_meta_quad(source, HYDRA['itemsPerPage'], Literal(nb_triple_per_page, datatype=XSD.int), meta_graph)
fragment.add_meta_quad(source, HYDRA['first'], self._tpf_url(dataset_base, 1, tpq.subject, tpq.predicate, tpq.obj), meta_graph)
if tpq.page > 1:
fragment.add_meta_quad(source, HYDRA['previous'], self._tpf_url(dataset_base, tpq.page - 1, tpq.subject, tpq.predicate, tpq.obj), meta_graph)
if not last_result:
fragment.add_meta_quad(source, HYDRA['next'], self._tpf_url(dataset_base, tpq.page + 1, tpq.subject, tpq.predicate, tpq.obj), meta_graph)
fragment.add_prefix('twittertpf', Namespace("%s#" % tpf_url[:-1]))
fragment.add_prefix('void', VOID)
fragment.add_prefix('foaf', FOAF)
fragment.add_prefix('hydra', HYDRA)
fragment.add_prefix('purl', Namespace('http://purl.org/dc/terms/'))
def _frament_fill_meta(self, tpq, fragment, last_result, total_nb_triples, nb_triple_per_page, request, tpf_url):
meta_graph = self._tpf_uri(tpf_url, 'metadata')
fragment.add_graph(meta_graph)
dataset_base = self._tpf_uri(tpf_url)
source = URIRef(request.build_absolute_uri())
dataset_template = Literal('%s%s' % (dataset_base, '{?subject,predicate,object}'))
data_graph = self._tpf_uri(tpf_url, 'dataset')
tp_node = BNode('triplePattern')
subject_node = BNode('subject')
predicate_node = BNode('predicate')
object_node = BNode('object')
HYDRA = Namespace("http://www.w3.org/ns/hydra/core#")
VOID = Namespace("http://rdfs.org/ns/void#")
FOAF = Namespace("http://xmlns.com/foaf/0.1/")
DCTERMS = Namespace("http://purl.org/dc/terms/")
fragment.add_meta_quad(meta_graph, FOAF['primaryTopic'], dataset_base, meta_graph)
fragment.add_meta_quad(data_graph, HYDRA['member'], data_graph, meta_graph)
fragment.add_meta_quad(data_graph, RDF.type, VOID['Dataset'], meta_graph)
fragment.add_meta_quad(data_graph, RDF.type, HYDRA['Collection'], meta_graph)
fragment.add_meta_quad(data_graph, VOID['subset'], source, meta_graph)
fragment.add_meta_quad(data_graph, VOID['uriLookupEndpoint'], dataset_template, meta_graph)
fragment.add_meta_quad(data_graph, HYDRA['search'], tp_node, meta_graph)
fragment.add_meta_quad(tp_node, HYDRA['template'], dataset_template, meta_graph)
fragment.add_meta_quad(tp_node, HYDRA['variableRepresentation'], HYDRA['ExplicitRepresentation'], meta_graph)
fragment.add_meta_quad(tp_node, HYDRA['mapping'], subject_node, meta_graph)
fragment.add_meta_quad(tp_node, HYDRA['mapping'], predicate_node, meta_graph)
fragment.add_meta_quad(tp_node, HYDRA['mapping'], object_node, meta_graph)
fragment.add_meta_quad(subject_node, HYDRA['variable'], Literal("subject"), meta_graph)
fragment.add_meta_quad(subject_node, HYDRA['property'], RDF.subject, meta_graph)
fragment.add_meta_quad(predicate_node, HYDRA['variable'], Literal("predicate"), meta_graph)
fragment.add_meta_quad(predicate_node, HYDRA['property'], RDF.predicate, meta_graph)
fragment.add_meta_quad(object_node, HYDRA['variable'], Literal("object"), meta_graph)
fragment.add_meta_quad(object_node, HYDRA['property'], RDF.object, meta_graph)
fragment.add_meta_quad(dataset_base, VOID['subset'], source, meta_graph)
fragment.add_meta_quad(source, RDF.type, HYDRA['PartialCollectionView'], meta_graph)
fragment.add_meta_quad(source, DCTERMS['title'], Literal("TPF Github search API v3"), meta_graph)
fragment.add_meta_quad(source, DCTERMS['description'], Literal("Triple Pattern from the github repo api v3 matching the pattern {?s=%s, ?p=%s, ?o=%s}" % (tpq.subject, tpq.predicate, tpq.obj)), meta_graph)
fragment.add_meta_quad(source, DCTERMS['source'], data_graph, meta_graph)
fragment.add_meta_quad(source, HYDRA['totalItems'], Literal(total_nb_triples, datatype=XSD.int), meta_graph)
fragment.add_meta_quad(source, VOID['triples'], Literal(total_nb_triples, datatype=XSD.int), meta_graph)
fragment.add_meta_quad(source, HYDRA['itemsPerPage'], Literal(nb_triple_per_page, datatype=XSD.int), meta_graph)
fragment.add_meta_quad(source, HYDRA['first'], self._tpf_url(dataset_base, 1, tpq.subject, tpq.predicate, tpq.obj), meta_graph)
if tpq.page > 1:
fragment.add_meta_quad(source, HYDRA['previous'], self._tpf_url(dataset_base, tpq.page - 1, tpq.subject, tpq.predicate, tpq.obj), meta_graph)
if not last_result:
fragment.add_meta_quad(source, HYDRA['next'], self._tpf_url(dataset_base, tpq.page + 1, tpq.subject, tpq.predicate, tpq.obj), meta_graph)
fragment.add_prefix('twittertpf', Namespace("%s#" % tpf_url[:-1]))
fragment.add_prefix('void', VOID)
fragment.add_prefix('foaf', FOAF)
fragment.add_prefix('hydra', HYDRA)
fragment.add_prefix('purl', Namespace('http://purl.org/dc/terms/'))
def get_lod_setup_items():
path = get_url_starter() + "/api/"
instance_abbrv = Site.objects.get_current().domain.split(".")[0]
context = {
"vf": "https://w3id.org/valueflows/",
"owl": "http://www.w3.org/2002/07/owl#",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"skos": "http://www.w3.org/2004/02/skos/core#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
#"rdfs:label": { "@container": "@language" },
"Agent": "vf:Agent",
"Person": "vf:Person",
"Group": "vf:Group",
#"Organization": "vf:Organization",
"url": { "@id": "vf:url", "@type": "@id" },
"image": { "@id": "vf:image", "@type": "@id" },
#"displayName": "vf:displayName",
#"displayNameMap": { "@id": "displayName", "@container": "@language" },
"Relationship": "vf:Relationship",
"subject": { "@id": "vf:subject", "@type": "@id" },
"object": { "@id": "vf:object", "@type": "@id" },
"relationship": { "@id": "vf:relationship", "@type": "@id" },
#"member": { "@id": "vf:member", "@type": "@id" }
"label": "skos:prefLabel",
"labelMap": { "@id": "skos:prefLabel", "@container": "@language" },
"note": "skos:note",
"noteMap": { "@id": "skos:note", "@container": "@language" },
"inverseOf": "owl:inverseOf",
instance_abbrv: path,
}
store = Graph()
#store.bind("foaf", FOAF)
store.bind("rdf", RDF)
store.bind("rdfs", RDFS)
store.bind("owl", OWL)
store.bind("skos", SKOS)
#as_ns = Namespace("http://www.w3.org/ns/activitystreams#")
#store.bind("as", as_ns)
#schema_ns = Namespace("http://schema.org/")
#store.bind("schema", schema_ns)
#at_ns = Namespace(path + "agent-type/")
#store.bind("at", at_ns)
#aat_ns = Namespace(path + "agent-relationship-type/")
#store.bind("aat", aat_ns)
vf_ns = Namespace("https://w3id.org/valueflows/")
store.bind("vf", vf_ns)
instance_ns = Namespace(path)
store.bind("instance", instance_ns)
return path, instance_abbrv, context, store, vf_ns
def get_lod_setup_items():
from rdflib import Graph
from rdflib.namespace import FOAF, RDF, RDFS, OWL, SKOS
from rdflib import Namespace
path = get_url_starter() + "/accounting/"
instance_abbrv = Site.objects.get_current().domain.split(".")[0]
context = {
"vf": "https://w3id.org/valueflows/",
"owl": "http://www.w3.org/2002/07/owl#",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"skos": "http://www.w3.org/2004/02/skos/core#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
#"rdfs:label": { "@container": "@language" },
"Agent": "vf:Agent",
"Person": "vf:Person",
"Group": "vf:Group",
#"Organization": "vf:Organization",
"url": { "@id": "vf:url", "@type": "@id" },
"image": { "@id": "vf:image", "@type": "@id" },
#"displayName": "vf:displayName",
#"displayNameMap": { "@id": "displayName", "@container": "@language" },
"Relationship": "vf:Relationship",
"subject": { "@id": "vf:subject", "@type": "@id" },
"object": { "@id": "vf:object", "@type": "@id" },
"relationship": { "@id": "vf:relationship", "@type": "@id" },
#"member": { "@id": "vf:member", "@type": "@id" }
"label": "skos:prefLabel",
"labelMap": { "@id": "skos:prefLabel", "@container": "@language" },
"note": "skos:note",
"noteMap": { "@id": "skos:note", "@container": "@language" },
"inverseOf": "owl:inverseOf",
instance_abbrv: path,
}
store = Graph()
#store.bind("foaf", FOAF)
store.bind("rdf", RDF)
store.bind("rdfs", RDFS)
store.bind("owl", OWL)
store.bind("skos", SKOS)
#as_ns = Namespace("http://www.w3.org/ns/activitystreams#")
#store.bind("as", as_ns)
#schema_ns = Namespace("http://schema.org/")
#store.bind("schema", schema_ns)
#at_ns = Namespace(path + "agent-type/")
#store.bind("at", at_ns)
#aat_ns = Namespace(path + "agent-relationship-type/")
#store.bind("aat", aat_ns)
vf_ns = Namespace("https://w3id.org/valueflows/")
store.bind("vf", vf_ns)
instance_ns = Namespace(path)
store.bind("instance", instance_ns)
return path, instance_abbrv, context, store, vf_ns
def agent_lod(request, agent_id):
agents = EconomicAgent.objects.filter(id=agent_id)
if not agents:
return HttpResponse({}, content_type='application/json')
agent = agents[0]
subject_assocs = agent.all_is_associates()
object_assocs = agent.all_has_associates()
from rdflib import Graph, Literal, BNode
from rdflib.namespace import FOAF, RDF, RDFS, OWL, SKOS
from rdflib.serializer import Serializer
from rdflib import Namespace, URIRef
path, instance_abbrv, context, store, vf_ns = get_lod_setup_items()
#Lynn: I made a change here for consistency. Please check and fix if needed.
ref = URIRef(instance_abbrv + ":agent-lod/" + str(agent.id) + "/")
if agent.agent_type.name == "Individual" or agent.agent_type.name == "Person":
store.add((ref, RDF.type, vf_ns.Person))
#elif agent.agent_type.name == "Organization":
# store.add((ref, RDF.type, vf_ns.Organization))
else:
at_class_name = camelcase(agent.agent_type.name)
ref_class = URIRef(instance_abbrv + ":agent-type-lod/" + at_class_name)
store.add((ref, RDF.type, ref_class))
store.add((ref, vf_ns["label"], Literal(agent.name, lang="en")))
#if agent.photo_url:
# store.add((ref, vf_ns["image"], agent.photo_url))
#if subject_assocs or object_assocs:
# store.add(( ))
if subject_assocs:
for a in subject_assocs:
obj_ref = URIRef(instance_abbrv + ":agent-relationship-lod/" + str(a.id) + "/")
property_name = camelcase_lower(a.association_type.label)
ref_relationship = URIRef(instance_abbrv + ":agent-relationship-type-lod/" + property_name)
store.add((ref, ref_relationship, obj_ref))
if object_assocs:
for a in object_assocs:
subj_ref = URIRef(instance_abbrv + ":agent-relationship-inv-lod/" + str(a.id) + "/")
inv_property_name = camelcase_lower(a.association_type.inverse_label)
inv_ref_relationship = URIRef(instance_abbrv + ":agent-relationship-type-lod/" + inv_property_name)
store.add((ref, inv_ref_relationship, subj_ref))
ser = store.serialize(format='json-ld', context=context, indent=4)
return HttpResponse(ser, content_type='application/json')
#following method supplied by Niklas at rdflib-jsonld support to get the desired output for nested rdf inputs for rdflib
def cli(ctx):
"""Smartcontainers for software and data preservation.
Smartcontainers provides a mechanism to add metadata to Docker
containers as a JSON-LD label. The metadata is contextualized using
W3C recommended PROV-O and ORCID IDs to capture provenance information.
The sc command wraps the docker commandline interface and passes any
docker command line parameters through to docker. Any command that changes
the state of the container is recorded in a prov graph and attached to the resultant
image.
"""
# Ignore config loading if we intend to create an orcid config
if ctx.args[0] == "config" and ctx.args[1] == "orcid":
return
Success = False
while not Success:
result = config_file.read_config()
if 'Configuration does not exist.' in result:
print("User configuration needs to be initialized")
selected = None
while not selected:
try:
selected = click.prompt('Do you have an ORCID profile (Y/N)')
if selected.lower() == 'y' or selected.lower() == 'yes':
config_by_search()
continue
if selected.lower() == 'n' or selected.lower() == 'no':
print("Please provide some basic information:")
query = {
'first_name': click.prompt(
'Please enter a first name', default='',
show_default=False
),
'last_name': click.prompt(
'Please enter a last name', default='',
show_default=False
)
}
dockerUseruuid = str(uuid.uuid4())
UUIDNS = Namespace("urn:uuid:")
config_file.graph.bind("foaf", FOAF)
config_file.graph.add( ( UUIDNS[dockerUseruuid], FOAF.givenName, Literal(query['first_name']) ) )
config_file.graph.add( ( UUIDNS[dockerUseruuid], FOAF.familyName, Literal(query['last_name']) ) )
config_file.config_obj = config_file.graph.serialize(format='turtle')
config_file.write_config()
except KeyError:
print('That is not a valid selection. Please try again.\n')
else:
Success = True
graph = config_file.graph
def build(self):
ds = self.graph
self.context = {"prov": "http://www.w3.org/ns/prov#",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"xsd": "http://www.w3.org/2001/XMLSchema#",
"dc": "http://purl.org/dc/terms"}
# Define some namespaces
PROV = Namespace("http://www.w3.org/ns/prov#")
ORE = Namespace("http://www.openarchives.org/ore/terms/")
OWL = Namespace("http://www.w3.org/2002/07/owl#")
DC = Namespace("http://purl.org/dc/terms/")
UUIDNS = Namespace("urn:uuid:")
DOCKER = Namespace("http://w3id.org/daspos/docker#")
# W3C namespace:
POSIX = Namespace("http://www.w3.org/ns/posix/stat#")
ACL = Namespace("http://www.w3.org/ns/auth/acl#")
# DASPOS namespaces
SC = Namespace("https://w3id.org/daspos/smartcontainers#")
CA = Namespace("https://w3id.org/daspos/computationalactivity#")
CE = Namespace("https://w3id.org/daspos/computationalenvironment#")
# Need to handle DOI
# http://bitwacker.com/2010/02/04/dois-uris-and-cool-resolution/
ds.bind("prov", PROV)
ds.bind("ore", ORE)
ds.bind("owl", OWL)
ds.bind("dc", DC)
ds.bind("uuidns", UUIDNS)
ds.bind("docker", DOCKER)
ds.bind("posix", POSIX)
ds.bind("acl", ACL)
ds.bind("sc", SC)
ds.bind("ca", CA)
ds.bind("ce", CE)
ds.bind("foaf", FOAF)
# Build agent metadata
self.build_agent(ds)
self.build_entity(ds)
self.build_activity(ds)
message_board_LOD_server.py 文件源码
项目:programming-the-semantic-web
作者: utecht
项目源码
文件源码
阅读 15
收藏 0
点赞 0
评论 0
def servedata(environ):
#Additional ns' for the queries
ourserver = "http://" + server_addr + ":" + str(server_port) + "/"
MBMSG = Namespace(ourserver + "messages/")
MBUSR = Namespace(ourserver + "users/")
path = environ["PATH_INFO"]
resp = {"status":"200 OK"}
resp["headers"] = [("Content-type", "application/rdf+xml")]
if environ["PATH_INFO"].find("users") != -1:
#user request query
userid = "mbusr:" + path[path.rindex("/") + 1:]
query = """CONSTRUCT {
""" + userid + """ sioc:creator_of ?msg .
?msg dc:title ?title .
""" + userid + """ foaf:name ?name .
} WHERE {
?msg sioc:has_creator """ + userid + """ .
?msg dc:title ?title .
""" + userid + """ foaf:name ?name .
} """
else:
#message request query
msgid = "mbmsg:" + path[path.rindex("/") + 1:]
query = """CONSTRUCT {
""" + msgid + """ dc:title ?title .
""" + msgid + """ sioc:has_creator ?user .
""" + msgid + """ sioc:content ?content .
} WHERE {
""" + msgid + """ dc:title ?title .
""" + msgid + """ sioc:has_creator ?user .
""" + msgid + """ sioc:content ?content .
} """
bindingdict = {'sioc':SIOC,
'dc':DC,
'dcterms':DCTERMS,
'foaf':FOAF,
'rdfs':RDFS,
'mb':MB,
'mbmsg':MBMSG,
'mbusr':MBUSR}
resp["body"] = [sg.query(query, initNs=bindingdict).serialize(format='xml')]
return resp
def __predicate_object_map__(self, map_iri):
"""Iterates through rr:predicateObjectMaps for this TripleMap
creating a SimpleNamespace for each triple map and assigning the
constant, template, parentTripleMap, reference as properties.
Args:
-----
map_iri: rdflib.URIRef, TripleMap IRI
Returns:
--------
list: List of predicate_object Namespace objects
"""
pred_obj_maps = []
for pred_obj_map_bnode in self.rml.objects(
subject=map_iri,
predicate=NS_MGR.rr.predicateObjectMap):
pred_obj_map = SimpleNamespace()
pred_obj_map.predicate = self.rml.value(
subject=pred_obj_map_bnode,
predicate=NS_MGR.rr.predicate)
obj_map_bnode = self.rml.value(
subject=pred_obj_map_bnode,
predicate=NS_MGR.rr.objectMap)
if obj_map_bnode is None:
continue
pred_obj_map.constant = self.rml.value(
subject=obj_map_bnode,
predicate=NS_MGR.rr.constant)
pred_obj_map.template = self.rml.value(
subject=obj_map_bnode,
predicate=NS_MGR.rr.template)
pred_obj_map.parentTriplesMap = self.rml.value(
subject=obj_map_bnode,
predicate=NS_MGR.rr.parentTriplesMap)
if pred_obj_map.parentTriplesMap is not None:
self.parents.add(str(pred_obj_map.parentTriplesMap))
pred_obj_map.reference = self.rml.value(
subject=obj_map_bnode,
predicate=NS_MGR.rr.reference)
pred_obj_map.datatype = self.rml.value(
subject=obj_map_bnode,
predicate=NS_MGR.rr.datatype)
pred_obj_map.query = self.rml.value(
subject=obj_map_bnode,
predicate=NS_MGR.rml.query)
# BIBCAT Extensions
pred_obj_map.delimiters = []
for obj in self.rml.objects(subject=obj_map_bnode,
predicate=NS_MGR.kds.delimiter):
pred_obj_map.delimiters.append(obj)
pred_obj_maps.append(pred_obj_map)
return pred_obj_maps
def __init__( self, document, graph, base = None, vocab_expansion = False, vocab_cache = True ) :
"""
@param graph: an RDF graph; an RDFLib Graph
@type graph: RDFLib Graph
@param document: top of the DOM tree, as returned by the HTML5 parser
@keyword base: the base of the Dom tree, either set from the outside or via a @base element
@keyword vocab_expansion: whether vocab expansion should be performed or not
@type vocab_expansion: Boolean
@keyword vocab_cache: if vocabulary expansion is done, then perform caching of the vocabulary data
@type vocab_cache: Boolean
"""
Microdata.__init__(self, document, base)
self.vocab_expansion = vocab_expansion
self.vocab_cache = vocab_cache
self.graph = graph
self.ns_md = Namespace( MD_VOCAB )
self.graph.bind( "md",MD_VOCAB )
self.vocabularies_used = False
# Get the vocabularies defined in the registry bound to proper names, if any...
def _use_rdfa_context () :
try :
from ..pyRdfa.initialcontext import initial_context
except :
from pyRdfa.initialcontext import initial_context
retval = {}
vocabs = initial_context["http://www.w3.org/2011/rdfa-context/rdfa-1.1"].ns
for prefix in list(vocabs.keys()) :
uri = vocabs[prefix]
if uri not in vocab_names and uri not in registry : retval[uri] = prefix
return retval
for vocab in registry :
if vocab in vocab_names :
self.graph.bind( vocab_names[vocab],vocab )
else :
hvocab = vocab + '#'
if hvocab in vocab_names :
self.graph.bind( vocab_names[hvocab],hvocab )
# Add the prefixes defined in the RDFa initial context to improve the outlook of the output
# I put this into a try: except: in case the pyRdfa package is not available...
try :
try :
from ..pyRdfa.initialcontext import initial_context
except :
from pyRdfa.initialcontext import initial_context
vocabs = initial_context["http://www.w3.org/2011/rdfa-context/rdfa-1.1"].ns
for prefix in list(vocabs.keys()) :
uri = vocabs[prefix]
if uri not in registry :
# if it is in the registry, then it may have needed some special microdata massage...
self.graph.bind( prefix,uri )
except :
pass
def __init__( self, document, graph, base = None, vocab_expansion = False, vocab_cache = True ) :
"""
@param graph: an RDF graph; an RDFLib Graph
@type graph: RDFLib Graph
@param document: top of the DOM tree, as returned by the HTML5 parser
@keyword base: the base of the Dom tree, either set from the outside or via a @base element
@keyword vocab_expansion: whether vocab expansion should be performed or not
@type vocab_expansion: Boolean
@keyword vocab_cache: if vocabulary expansion is done, then perform caching of the vocabulary data
@type vocab_cache: Boolean
"""
Microdata.__init__(self, document, base)
self.vocab_expansion = vocab_expansion
self.vocab_cache = vocab_cache
self.graph = graph
self.ns_md = Namespace( MD_VOCAB )
self.graph.bind( "md",MD_VOCAB )
self.vocabularies_used = False
# Get the vocabularies defined in the registry bound to proper names, if any...
def _use_rdfa_context () :
try :
from ..pyRdfa.initialcontext import initial_context
except :
from pyRdfa.initialcontext import initial_context
retval = {}
vocabs = initial_context["http://www.w3.org/2011/rdfa-context/rdfa-1.1"].ns
for prefix in list(vocabs.keys()) :
uri = vocabs[prefix]
if uri not in vocab_names and uri not in registry : retval[uri] = prefix
return retval
for vocab in registry :
if vocab in vocab_names :
self.graph.bind( vocab_names[vocab],vocab )
else :
hvocab = vocab + '#'
if hvocab in vocab_names :
self.graph.bind( vocab_names[hvocab],hvocab )
# Add the prefixes defined in the RDFa initial context to improve the outlook of the output
# I put this into a try: except: in case the pyRdfa package is not available...
try :
try :
from ..pyRdfa.initialcontext import initial_context
except :
from pyRdfa.initialcontext import initial_context
vocabs = initial_context["http://www.w3.org/2011/rdfa-context/rdfa-1.1"].ns
for prefix in list(vocabs.keys()) :
uri = vocabs[prefix]
if uri not in registry :
# if it is in the registry, then it may have needed some special microdata massage...
self.graph.bind( prefix,uri )
except :
pass
def __init__(self, namespace={}, prefixes='', newqueries={}):
self.graph = rdflib.Graph()
self.namespace.update(namespace)
self.prefixes += prefixes
# run all given "SELECT" queries through prepareQuery function.
for (id, query) in newqueries.iteritems():
leader = query.strip()[0:6]
if leader == 'SELECT': # prepareQuery only works on SELECT ...
self.queries[id] = rdflib.plugins.sparql.prepareQuery(query, initNs = self.namespace)
print "Adding SELECT query"
elif leader == 'DELETE' or leader == 'INSERT':
self.queries[id] = query
print "Adding DEL/INS query"
#self.queries.update(queries)
#print "Done query prep."
def do_query_update(self, query_name, initBinds = {}):
"""
Given a sparql 1.1 update query, perform it.
"""
query = self.queries[query_name]
try:
# Doesn't work?! fails silently.
result = self.graph.update(self.prefixes + query, initBindings=initBinds, initNs=self.namespace)
#result = processUpdate(self.graph, self.prefixes + query, initBindings=initBinds, initNs=self.namespace)
except Exception as e:
print ("\nSparql query [%s] parsing problem: %s \n" % (query_name, str(e) ))
return None
return result
def doQueryUpdate(self, query_name, initBinds = {}):
"""
Given a sparql 1.1 update query, perform it.
"""
query = self.queries[query_name]
try:
# Doesn't work?! fails silently.
#result = self.graph.update(self.prefixes + query, initBindings=initBinds, initNs=self.namespace)
result = processUpdate(self.graph, self.prefixes + query, initBindings=initBinds, initNs=self.namespace)
except Exception as e:
print ("\nSparql query [%s] parsing problem: %s \n" % (query_name, str(e) ))
return None
return result
def doQueryUpdate(self, query_name, initBinds = {}):
"""
Given a sparql 1.1 update query, perform it.
"""
query = self.queries[query_name]
try:
# Doesn't work?! fails silently.
#result = self.graph.update(self.prefixes + query, initBindings=initBinds, initNs=self.namespace)
result = processUpdate(self.graph, self.prefixes + query, initBindings=initBinds, initNs=self.namespace)
except Exception as e:
print ("\nSparql query [%s] parsing problem: %s \n" % (query_name, str(e) ))
return None
return result
def __neg__(self):
"""
>>> (- Literal(1))
rdflib.term.Literal(%(u)s'-1', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> (- Literal(10.5))
rdflib.term.Literal(%(u)s'-10.5', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#double'))
>>> from rdflib.namespace import XSD
>>> (- Literal("1", datatype=XSD.integer))
rdflib.term.Literal(%(u)s'-1', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> (- Literal("1"))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Not a number; rdflib.term.Literal(%(u)s'1')
>>>
"""
if isinstance(self.value, (int, long, float)):
return Literal(self.value.__neg__())
else:
raise TypeError("Not a number; %s" % repr(self))
def __pos__(self):
"""
>>> (+ Literal(1))
rdflib.term.Literal(%(u)s'1', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> (+ Literal(-1))
rdflib.term.Literal(%(u)s'-1', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> from rdflib.namespace import XSD
>>> (+ Literal("-1", datatype=XSD.integer))
rdflib.term.Literal(%(u)s'-1', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> (+ Literal("1"))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Not a number; rdflib.term.Literal(%(u)s'1')
"""
if isinstance(self.value, (int, long, float)):
return Literal(self.value.__pos__())
else:
raise TypeError("Not a number; %s" % repr(self))
def __abs__(self):
"""
>>> abs(Literal(-1))
rdflib.term.Literal(%(u)s'1', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> from rdflib.namespace import XSD
>>> abs( Literal("-1", datatype=XSD.integer))
rdflib.term.Literal(%(u)s'1', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> abs(Literal("1"))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Not a number; rdflib.term.Literal(%(u)s'1')
"""
if isinstance(self.value, (int, long, float)):
return Literal(self.value.__abs__())
else:
raise TypeError("Not a number; %s" % repr(self))
def __invert__(self):
"""
>>> ~(Literal(-1))
rdflib.term.Literal(%(u)s'0', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> from rdflib.namespace import XSD
>>> ~( Literal("-1", datatype=XSD.integer))
rdflib.term.Literal(%(u)s'0', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
Not working:
>>> ~(Literal("1"))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Not a number; rdflib.term.Literal(%(u)s'1')
"""
if isinstance(self.value, (int, long, float)):
return Literal(self.value.__invert__())
else:
raise TypeError("Not a number; %s" % repr(self))
def __neg__(self):
"""
>>> (- Literal(1))
rdflib.term.Literal(%(u)s'-1', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> (- Literal(10.5))
rdflib.term.Literal(%(u)s'-10.5', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#double'))
>>> from rdflib.namespace import XSD
>>> (- Literal("1", datatype=XSD.integer))
rdflib.term.Literal(%(u)s'-1', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> (- Literal("1"))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Not a number; rdflib.term.Literal(%(u)s'1')
>>>
"""
if isinstance(self.value, (int, long, float)):
return Literal(self.value.__neg__())
else:
raise TypeError("Not a number; %s" % repr(self))
def __pos__(self):
"""
>>> (+ Literal(1))
rdflib.term.Literal(%(u)s'1', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> (+ Literal(-1))
rdflib.term.Literal(%(u)s'-1', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> from rdflib.namespace import XSD
>>> (+ Literal("-1", datatype=XSD.integer))
rdflib.term.Literal(%(u)s'-1', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> (+ Literal("1"))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Not a number; rdflib.term.Literal(%(u)s'1')
"""
if isinstance(self.value, (int, long, float)):
return Literal(self.value.__pos__())
else:
raise TypeError("Not a number; %s" % repr(self))
def __abs__(self):
"""
>>> abs(Literal(-1))
rdflib.term.Literal(%(u)s'1', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> from rdflib.namespace import XSD
>>> abs( Literal("-1", datatype=XSD.integer))
rdflib.term.Literal(%(u)s'1', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> abs(Literal("1"))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Not a number; rdflib.term.Literal(%(u)s'1')
"""
if isinstance(self.value, (int, long, float)):
return Literal(self.value.__abs__())
else:
raise TypeError("Not a number; %s" % repr(self))
def __invert__(self):
"""
>>> ~(Literal(-1))
rdflib.term.Literal(%(u)s'0', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
>>> from rdflib.namespace import XSD
>>> ~( Literal("-1", datatype=XSD.integer))
rdflib.term.Literal(%(u)s'0', datatype=rdflib.term.URIRef(%(u)s'http://www.w3.org/2001/XMLSchema#integer'))
Not working:
>>> ~(Literal("1"))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Not a number; rdflib.term.Literal(%(u)s'1')
"""
if isinstance(self.value, (int, long, float)):
return Literal(self.value.__invert__())
else:
raise TypeError("Not a number; %s" % repr(self))
def get_ontology_imports(self, ontology_file_path='./imports/'):
"""
Detects all the import files in a loaded OWL ontology graph and adds them to the graph.
Currently assumes imports are sitting in a folder called "imports" in parent folder of this script.
"""
query = rdflib.plugins.sparql.prepareQuery("""
SELECT distinct ?import_file
WHERE {?s owl:imports ?import_file . }
ORDER BY (?import_file)
""", initNs = self.namespace)
imports = self.graph.query(query, initNs = self.namespace)
print("It has %s import files ..." % len(imports))
for result_row in imports: # a rdflib.query.ResultRow
file = result_row.import_file.rsplit('/',1)[1]
file_path = ontology_file_path + '/' + file
try:
if os.path.isfile( file_path):
self.graph.parse(file_path)
else:
print ('WARNING:' + file_path + " could not be loaded! Does its ontology include purl have a corresponding local file? \n")
except rdflib.exceptions.ParserError as e:
print (file_path + " needs to be in RDF OWL format!")
def graph_from_catalog(self, catalog_dict, catalog_ref):
g = self.g
for prefix, namespace in namespaces.iteritems():
g.bind(prefix, namespace)
g.add((catalog_ref, RDF.type, DCAT.Catalog))
# Basic fields
items = [
('title', DCT.title, config.get('ckan.site_title'), Literal),
('description', DCT.description, config.get('ckan.site_description'), Literal),
('homepage', FOAF.homepage, config.get('ckan.site_url'), URIRef),
('language', DCT.language, config.get('ckan.locale_default', 'en'), Literal),
]
for item in items:
key, predicate, fallback, _type = item
if catalog_dict:
value = catalog_dict.get(key, fallback)
else:
value = fallback
if value:
g.add((catalog_ref, predicate, _type(value)))
# Dates
modified = self._last_catalog_modification()
if modified:
self._add_date_triple(catalog_ref, DCT.modified, modified)