def checkrequest(request):
"""Analyze RDF data contained in a POST request.
Args:
request: A Flask HTTP Request.
Returns:
data: A list with RDFLib.quads object and the rdflib.ConjunciveGraph object
Raises:
Exception: I contained data is not valid nquads.
"""
data = []
reqdata = request.data
graph = ConjunctiveGraph()
try:
graph.parse(data=reqdata, format='nquads')
except Exception as e:
raise e
quads = graph.quads((None, None, None, None))
data = splitinformation(quads, graph)
return data
python类ConjunctiveGraph()的实例源码
def getgraphfromfile(self):
"""Return a Conjunctive Graph generated from the referenced file.
Returns:
A ConjunctiveGraph
"""
graph = ConjunctiveGraph()
try:
graph.parse(self.path, format='nquads', publicID='http://localhost:5000/')
logger.debug('Success: File', self.path, 'parsed')
except KeyError as e:
# Given file contains non valid rdf data
# logger.debug('Error: File', self.path, 'not parsed')
# self.__setcontent([[None][None][None][None]])
pass
return graph
def test_time():
with CSVW(csv_path="tests/datatypes.time.csv",
metadata_path="tests/datatypes.time.csv-metadata.json") as csvw:
rdf_output = csvw.to_rdf()
g = ConjunctiveGraph()
g.parse(data=rdf_output, format="turtle")
NS = Namespace('https://www.example.org/')
time1_lit = Literal("19:30:00", datatype=XSD.time)
assert len(list(g.triples((NS['event/1'], NS['time1'], time1_lit)))) == 1
time2_lit = Literal("09:30:10.5", datatype=XSD.time)
assert len(list(g.triples((NS['event/1'], NS['time2'], time2_lit)))) == 1
time3_lit = Literal("10:30:10Z", datatype=XSD.time)
assert len(list(g.triples((NS['event/1'], NS['time3'], time3_lit)))) == 1
time4_lit = Literal("11:30:10-06:00", datatype=XSD.time)
assert len(list(g.triples((NS['event/1'], NS['time4'], time4_lit)))) == 1
time5_lit = Literal("04:30:10+04:00", datatype=XSD.time)
assert len(list(g.triples((NS['event/1'], NS['time5'], time5_lit)))) == 1
def test_date():
with CSVW(csv_path="tests/datatypes.date.csv",
metadata_path="tests/datatypes.date.csv-metadata.json") as csvw:
rdf_output = csvw.to_rdf()
g = ConjunctiveGraph()
g.parse(data=rdf_output, format="turtle")
date1_lit = Literal("2017-01-09", datatype=XSD.date)
assert len(list(g.triples((NS['event/1'], NS['date1'], date1_lit)))) == 1
date2_lit = Literal("2017-01-10Z", datatype=XSD.date)
assert len(list(g.triples((NS['event/1'], NS['date2'], date2_lit)))) == 1
date3_lit = Literal("2017-01-11", datatype=XSD.date)
assert len(list(g.triples((NS['event/1'], NS['date3'], date3_lit)))) == 1
date4_lit = Literal("2002-09-24-06:00", datatype=XSD.date)
assert len(list(g.triples((NS['event/1'], NS['date4'], date4_lit)))) == 1
date5_lit = Literal("2002-09-24+04:00", datatype=XSD.date)
assert len(list(g.triples((NS['event/1'], NS['date5'], date5_lit)))) == 1
def test_datetime():
with CSVW(csv_path="tests/datatypes.datetime.csv",
metadata_path="tests/datatypes.datetime.csv-metadata.json") as csvw:
rdf_output = csvw.to_rdf()
g = ConjunctiveGraph()
g.parse(data=rdf_output, format="turtle")
dt1_lit = Literal("2002-05-30T09:00:00", datatype=XSD.dateTime)
assert len(list(g.triples((NS['event/1'], NS['datetime1'], dt1_lit)))) == 1
dt2_lit = Literal("2002-05-30T09:30:10.5", datatype=XSD.dateTime)
assert len(list(g.triples((NS['event/1'], NS['datetime2'], dt2_lit)))) == 1
dt3_lit = Literal("2002-05-30T09:30:10Z", datatype=XSD.dateTime)
assert len(list(g.triples((NS['event/1'], NS['datetime3'], dt3_lit)))) == 1
dt4_lit = Literal("2002-05-30T09:30:10-06:00", datatype=XSD.dateTime)
assert len(list(g.triples((NS['event/1'], NS['datetime4'], dt4_lit)))) == 1
dt5_lit = Literal("2002-05-30T09:30:10+04:00", datatype=XSD.dateTime)
assert len(list(g.triples((NS['event/1'], NS['datetime5'], dt5_lit)))) == 1
datestamp = Literal("2004-04-12T13:20:00-05:00", datatype=XSD.dateTimeStamp)
assert len(list(g.triples((NS['event/1'], NS['datetimestamp'], datestamp)))) == 1
def test_bool_with_format():
csvw = CSVW(csv_path="tests/datatypes.bool.csv",
metadata_path="tests/datatypes.bool.csv-metadata.json")
rdf_output = csvw.to_rdf()
g = ConjunctiveGraph()
g.parse(data=rdf_output, format="turtle")
true_lit = Literal(True, datatype=XSD.boolean)
false_lit = Literal(False, datatype=XSD.boolean)
assert len(list(g.triples((NS['event/1'], NS['bool1'], true_lit)))) == 1
assert len(list(g.triples((NS['event/1'], NS['bool2'], true_lit)))) == 1
assert len(list(g.triples((NS['event/1'], NS['bool3'], true_lit)))) == 1
assert len(list(g.triples((NS['event/2'], NS['bool1'], false_lit)))) == 1
assert len(list(g.triples((NS['event/2'], NS['bool2'], false_lit)))) == 1
assert len(list(g.triples((NS['event/2'], NS['bool3'], false_lit)))) == 1
assert len(list(g.triples((NS['event/3'], NS['bool1'], false_lit)))) == 1
assert len(list(g.triples((NS['event/3'], NS['bool2'], false_lit)))) == 1
assert len(list(g.triples((NS['event/3'], NS['bool3'], false_lit)))) == 1
def verify_rdf(rdf_output):
ids_ns = Namespace("http://foo.example.org/CSV/People-IDs/")
ages_ns = Namespace("http://foo.example.org/CSV/People-Ages/")
g = ConjunctiveGraph()
g.parse(data=rdf_output, format="turtle")
all_subjects = {x for x in g.subjects()}
assert len(all_subjects) == 2
bob_subj = ids_ns['1']
joe_subj = ids_ns['2']
assert bob_subj in all_subjects
assert joe_subj in all_subjects
# Bob's details
assert len([g.triples((bob_subj, ids_ns.id, Literal(1)))]) == 1
assert len([g.triples((bob_subj, ids_ns.name, Literal("Bob")))]) == 1
assert len([g.triples((bob_subj, ages_ns.age, Literal(34)))]) == 1
# Joe's details
assert len([g.triples((joe_subj, ids_ns.id, Literal(2)))]) == 1
assert len([g.triples((joe_subj, ids_ns.name, Literal("Joe")))]) == 1
assert len([g.triples((joe_subj, ages_ns.age, Literal(54)))]) == 1
def test_encoding_rdf():
# With encoding specified
encoding = "ISO-8859-1"
csvw = CSVW(csv_path="./tests/iso_encoding.csv",
metadata_path="./tests/iso_encoding.csv-metadata.json",
csv_encoding=encoding)
rdf_output = csvw.to_rdf()
g = ConjunctiveGraph()
g.parse(data=rdf_output, format="turtle")
units = Namespace('http://example.org/units/')
cars = Namespace('http://example.org/cars/')
meta = Namespace("http://example.org/properties/")
expected_unit = units[quote(u"\xb5100".encode('utf-8'))]
assert (cars['1'], meta['UnitOfMeasurement'], expected_unit) in g
assert expected_unit in list(g.objects())
def serialize(self, add, delete):
commit = Namespace("urn:commit:" + str(uuid.uuid1()) + ":")
eccrev = Namespace("https://vocab.eccenca.com/revision/")
g = ConjunctiveGraph()
namespace_manager = NamespaceManager(g)
namespace_manager.bind('eccrev', eccrev, override=False)
g.add((commit.term(""), RDF.type, eccrev.Commit))
graphUris = set(delete.keys()) | set(add.keys())
for graphUri in graphUris:
if (graphUri in delete.keys() and len(delete[graphUri]) > 0) or (graphUri in add.keys() and len(add[graphUri]) > 0):
revision = Namespace("urn:revision:" + str(uuid.uuid1()) + ":")
g.add((commit.term(""), eccrev.hasRevision, revision.term("")))
g.add((revision.term(""), RDF.type, eccrev.Revision))
if str(graphUri) != 'http://quitdiff.default/':
g.add((revision.term(""), eccrev.hasRevisionGraph, graphUri))
if graphUri in delete.keys() and len(delete[graphUri]) > 0:
deleteGraphName = revision.term(":delete")
g.add((revision.term(""), eccrev.deltaDelete, deleteGraphName))
for triple in delete[graphUri]:
g.add(triple + (deleteGraphName,))
if graphUri in add.keys() and len(add[graphUri]) > 0:
insertGraphName = revision.term(":insert")
g.add((revision.term(""), eccrev.deltaInsert, insertGraphName))
for triple in add[graphUri]:
g.add(triple + (insertGraphName,))
return g.serialize(format="trig").decode("utf-8")
def serialize(self, add, delete):
diff = Namespace("http://topbraid.org/diff#")
g = ConjunctiveGraph()
namespace_manager = NamespaceManager(g)
namespace_manager.bind('diff', diff, override=False)
namespace_manager.bind('owl', OWL, override=False)
graphUris = set(delete.keys()) | set(add.keys())
for graphUri in graphUris:
if (graphUri in delete.keys() and len(delete[graphUri]) > 0) or (graphUri in add.keys() and len(add[graphUri]) > 0):
changeset = Namespace("urn:diff:" + str(uuid.uuid1()))
graphTerm = changeset.term("")
if str(graphUri) != 'http://quitdiff.default/':
g.add((graphTerm, OWL.imports, graphUri, graphTerm))
g.add((graphTerm, RDF.type, OWL.Ontology, graphTerm))
g.add((graphTerm, OWL.imports, diff.term(""), graphTerm))
if graphUri in delete.keys() and len(delete[graphUri]) > 0:
i = 0
for triple in delete[graphUri]:
deleteStatementName = BNode()
g.add((deleteStatementName, RDF.type, diff.DeletedTripleDiff, graphTerm))
g.add((deleteStatementName, RDF.subject, triple[0], graphTerm))
g.add((deleteStatementName, RDF.predicate, triple[1], graphTerm))
g.add((deleteStatementName, RDF.object, triple[2], graphTerm))
i += 1
if graphUri in add.keys() and len(add[graphUri]) > 0:
i = 0
for triple in add[graphUri]:
insertGraphName = BNode()
g.add((insertGraphName, RDF.type, diff.AddedTripleDiff, graphTerm))
g.add((insertGraphName, RDF.subject, triple[0], graphTerm))
g.add((insertGraphName, RDF.predicate, triple[1], graphTerm))
g.add((insertGraphName, RDF.object, triple[2], graphTerm))
i += 1
return g.serialize(format="trig").decode("utf-8")
def serialize(self, add, delete):
changeset = Namespace("http://purl.org/vocab/changeset/schema#")
g = ConjunctiveGraph()
namespace_manager = NamespaceManager(g)
namespace_manager.bind('changeset', changeset, override=False)
graphUris = set(delete.keys()) | set(add.keys())
for graphUri in graphUris:
if (graphUri in delete.keys() and len(delete[graphUri]) > 0) or (graphUri in add.keys() and len(add[graphUri]) > 0):
diff = Namespace("urn:changeset:" + str(uuid.uuid1()))
graphTerm = diff.term("")
g.add((graphTerm, RDF.type, changeset.ChangeSet))
if str(graphUri) != 'http://quitdiff.default/':
g.add((graphTerm, changeset.subjectOfChange, graphUri))
if graphUri in delete.keys() and len(delete[graphUri]) > 0:
i = 0
for triple in delete[graphUri]:
deleteStatementName = BNode()
g.add((graphTerm, changeset.removal, deleteStatementName))
g.add((deleteStatementName, RDF.type, RDF.Statement))
g.add((deleteStatementName, RDF.subject, triple[0]))
g.add((deleteStatementName, RDF.predicate, triple[1]))
g.add((deleteStatementName, RDF.object, triple[2]))
i += 1
if graphUri in add.keys() and len(add[graphUri]) > 0:
i = 0
for triple in add[graphUri]:
insertGraphName = BNode()
g.add((graphTerm, changeset.addition, insertGraphName))
g.add((insertGraphName, RDF.type, RDF.Statement))
g.add((insertGraphName, RDF.subject, triple[0]))
g.add((insertGraphName, RDF.predicate, triple[1]))
g.add((insertGraphName, RDF.object, triple[2]))
i += 1
return g.serialize(format="turtle").decode("utf-8")
def parse(self, source=None, publicID=None, format="xml",
location=None, file=None, data=None, context=u'http://example.com', **args):
# parse to memory first, then do a bulk insert into our DB
logging.debug('parsing to memory...')
cj = rdflib.ConjunctiveGraph()
memg = cj.get_context(context)
memg.parse(source=source, publicID=publicID, format=format, location=location,
file=file, data=data, **args)
# for s, p, o in memg:
# if not isinstance (o, rdflib.Literal):
# continue
# if not 'dateTime' in o.datatype:
# continue
# # import pdb; pdb.set_trace()
# s = unicode(o)
# print u"Got one!! %s" % s
quads = cj.quads()
logging.debug('addN ...')
self.addN(quads)
def _load(cls, source):
graph = ConjunctiveGraph()
graph.parse(source, format='trig')
return graph
# def get_os(self):
# return 'Ubuntu'
#
# def get_os_version(self):
# return '12.04'
#
# def get_create_date(self):
# return '20091004T111800Z'
#
# def get_environment_vars(self):
#
# results = self.graph.query(
# """SELECT DISTINCT ?variable ?value
# WHERE {
# ?x nipype:environmentVariable ?variable .
# ?x prov:value ?value .
# }""")
#
# return results
def parse(self, inputsource, sink, **kwargs):
"""Parse f as an N-Triples file."""
assert sink.store.context_aware, ("NQuadsParser must be given"
" a context aware store.")
self.sink = ConjunctiveGraph(store=sink.store, identifier=sink.identifier)
source = inputsource.getByteStream()
if not hasattr(source, 'read'):
raise ParseError("Item to parse must be a file-like object.")
source = getreader('utf-8')(source)
self.file = source
self.buffer = ''
while True:
self.line = __line = self.readline()
if self.line is None:
break
try:
self.parseline()
except ParseError, msg:
raise ParseError("Invalid line (%s):\n%r" % (msg, __line))
return self.sink
def __init__(self, graph=None, bindings=None):
self.bindings = bindings or Bindings()
if isinstance(graph, ConjunctiveGraph):
self._dataset = graph
if rdflib.plugins.sparql.SPARQL_DEFAULT_GRAPH_UNION:
self.graph = self.dataset
else:
self.graph = self.dataset.default_context
else:
self._dataset = None
self.graph = graph
self.prologue = None
self.now = datetime.datetime.now()
self.bnodes = collections.defaultdict(BNode)
def check_serialize_parse(fpath, infmt, testfmt, verbose=False):
g = ConjunctiveGraph()
_parse_or_report(verbose, g, fpath, format=infmt)
if verbose:
for t in g:
print t
print "========================================"
print "Parsed OK!"
s = g.serialize(format=testfmt)
if verbose:
print s
g2 = ConjunctiveGraph()
_parse_or_report(verbose, g2, data=s, format=testfmt)
if verbose:
print g2.serialize()
crapCompare(g,g2)
def setUp(self):
try:
self.graph = ConjunctiveGraph(store=self.store)
except ImportError:
raise SkipTest(
"Dependencies for store '%s' not available!" % self.store)
if self.store == "SQLite":
_, self.tmppath = mkstemp(
prefix='test', dir='/tmp', suffix='.sqlite')
else:
self.tmppath = mkdtemp()
self.graph.open(self.tmppath, create=True)
self.michel = URIRef(u'michel')
self.tarek = URIRef(u'tarek')
self.bob = URIRef(u'bob')
self.likes = URIRef(u'likes')
self.hates = URIRef(u'hates')
self.pizza = URIRef(u'pizza')
self.cheese = URIRef(u'cheese')
self.c1 = URIRef(u'context-1')
self.c2 = URIRef(u'context-2')
# delete the graph for each test!
self.graph.remove((None, None, None))
def testSameSubject(self):
g=rdflib.ConjunctiveGraph()
g.get_context('urn:a').add(( rdflib.URIRef('urn:1'),
rdflib.URIRef('urn:p1'),
rdflib.URIRef('urn:o1') ))
g.get_context('urn:b').add(( rdflib.URIRef('urn:1'),
rdflib.URIRef('urn:p2'),
rdflib.URIRef('urn:o2') ))
self.assertEqual(len(g.get_context('urn:a')),1)
self.assertEqual(len(g.get_context('urn:b')),1)
s=g.serialize(format='trig')
self.assertEqual(len(re.findall(b("p1"), s)), 1)
self.assertEqual(len(re.findall(b("p2"), s)), 1)
self.assert_(b('{}') not in s) # no empty graphs!
def testFinalNewline():
"""
http://code.google.com/p/rdflib/issues/detail?id=5
"""
import sys
import platform
if getattr(sys, 'pypy_version_info', None) or platform.system() == 'Java':
from nose import SkipTest
raise SkipTest(
'Testing under pypy and Jython2.5 fails to detect that ' + \
'IOMemory is a context_aware store')
graph=ConjunctiveGraph()
graph.add((URIRef("http://ex.org/a"),
URIRef("http://ex.org/b"),
URIRef("http://ex.org/c")))
failed = set()
for p in rdflib.plugin.plugins(None, rdflib.plugin.Serializer):
v = graph.serialize(format=p.name)
lines = v.split(b("\n"))
if b("\n") not in v or (lines[-1]!=b('')):
failed.add(p.name)
assert len(failed)==0, "No final newline for formats: '%s'" % failed
def test_serialize(self):
g = ConjunctiveGraph()
uri1 = URIRef("http://example.org/mygraph1")
uri2 = URIRef("http://example.org/mygraph2")
bob = URIRef(u'urn:bob')
likes = URIRef(u'urn:likes')
pizza = URIRef(u'urn:pizza')
g.get_context(uri1).add((bob, likes, pizza))
g.get_context(uri2).add((bob, likes, pizza))
s = g.serialize(format='nquads')
self.assertEqual(len([x for x in s.split(b("\n")) if x.strip()]), 2)
g2 = ConjunctiveGraph()
g2.parse(data=s, format='nquads')
self.assertEqual(len(g), len(g2))
self.assertEqual(sorted(x.identifier for x in g.contexts()),
sorted(x.identifier for x in g2.contexts()))
def test_bnode_publicid():
g = ConjunctiveGraph()
b = BNode()
data = '<d:d> <e:e> <f:f> .'
print ("Parsing %r into %r"%(data, b))
g.parse(data=data, format='turtle', publicID=b)
triples = list( g.get_context(b).triples((None,None,None)) )
if not triples:
raise Exception("No triples found in graph %r"%b)
u = URIRef(b)
triples = list( g.get_context(u).triples((None,None,None)) )
if triples:
raise Exception("Bad: Found in graph %r: %r"%(u, triples))
def parse(self, inputsource, sink, **kwargs):
"""Parse f as an N-Triples file."""
assert sink.store.context_aware, ("NQuadsParser must be given"
" a context aware store.")
self.sink = ConjunctiveGraph(store=sink.store)
source = inputsource.getByteStream()
if not hasattr(source, 'read'):
raise ParseError("Item to parse must be a file-like object.")
source = getreader('utf-8')(source)
self.file = source
self.buffer = ''
while True:
self.line = __line = self.readline()
if self.line is None:
break
try:
self.parseline()
except ParseError, msg:
raise ParseError("Invalid line (%s):\n%r" % (msg, __line))
return self.sink
def __init__(self, graph=None, bindings=None):
self.bindings = bindings or Bindings()
if isinstance(graph, ConjunctiveGraph):
self._dataset = graph
if rdflib.plugins.sparql.SPARQL_DEFAULT_GRAPH_UNION:
self.graph = self.dataset
else:
self.graph = self.dataset.default_context
else:
self._dataset = None
self.graph = graph
self.prologue = None
self.now = datetime.datetime.now()
self.bnodes = collections.defaultdict(BNode)
def _convertRDF(self):
"""
Convert a RDF/XML result into an RDFLib triple store. This method can be overwritten
in a subclass for a different conversion method.
@return: converted result
@rtype: RDFLib Graph
"""
try:
from rdflib.graph import ConjunctiveGraph
except ImportError:
from rdflib import ConjunctiveGraph
retval = ConjunctiveGraph()
# this is a strange hack. If the publicID is not set, rdflib (or the underlying xml parser) makes a funny
#(and, as far as I could see, meaningless) error message...
retval.load(self.response, publicID=' ')
return retval
def participants_private_as_graph(self, discussion_id):
from assembl.models import Discussion, AgentProfile
local_uri = self.local_uri()
discussion = Discussion.get(discussion_id)
d_storage_name = self.private_user_storage.name
d_graph_iri = self.private_user_storage.sections[0].graph_iri
cg = ConjunctiveGraph(identifier=d_graph_iri)
v = get_virtuoso(self.session, d_storage_name)
v_main = get_virtuoso(self.session, self.discussion_storage_name())
participant_ids = discussion.get_participants(True)
profiles={URIRef(AgentProfile.uri_generic(id, local_uri))
for id in participant_ids}
self.add_subject_data(v, cg, profiles)
accounts = [account for ((account, p, profile), g)
in v_main.triples((None, SIOC.account_of, None))
if profile in profiles]
self.add_subject_data(v, cg, accounts)
return cg
def instance_view_jsonld(request):
from assembl.semantic.virtuoso_mapping import AssemblQuadStorageManager
from rdflib import URIRef, ConjunctiveGraph
ctx = request.context
user_id = authenticated_userid(request) or Everyone
permissions = ctx.get_permissions()
instance = ctx._instance
if not instance.user_can(user_id, CrudPermissions.READ, permissions):
raise HTTPUnauthorized()
discussion = ctx.get_instance_of_class(Discussion)
if not discussion:
raise HTTPNotFound()
aqsm = AssemblQuadStorageManager()
uri = URIRef(aqsm.local_uri() + instance.uri()[6:])
d_storage_name = aqsm.discussion_storage_name(discussion.id)
v = get_virtuoso(instance.db, d_storage_name)
cg = ConjunctiveGraph(v, d_storage_name)
result = cg.triples((uri, None, None))
#result = v.query('select ?p ?o ?g where {graph ?g {<%s> ?p ?o}}' % uri)
# Something is wrong here.
triples = '\n'.join([
'%s %s %s.' % (uri.n3(), p.n3(), o.n3())
for (s, p, o) in result
if '_with_no_name_entry' not in o])
return aqsm.quads_to_jsonld(triples)
def __init__(self):
"""Initialize a new MemoryStore instance."""
logger = logging.getLogger('quit.core.MemoryStore')
logger.debug('Create an instance of MemoryStore')
self.store = ConjunctiveGraph(identifier='default')
return
def test_all_triples_with_row_numbers():
csvw = CSVW(csv_path='tests/virtual1.csv',
metadata_path='tests/virtual1.csv-metadata.json')
rdf_output = csvw.to_rdf()
g = ConjunctiveGraph()
g.parse(data=rdf_output, format="turtle")
all_subjects = {x for x in g.subjects()}
assert len(all_subjects) == 4
ns = Namespace("http://example.org/")
assert ns['sub-1'] in all_subjects
assert ns['sub-2'] in all_subjects
assert len([g.triples((ns['sub-1'], ns['obj-1'], ns['pred-1']))]) == 1
assert len([g.triples((ns['sub-2'], ns['obj-2'], ns['pred-2']))]) == 1
def test_default():
csvw = CSVW(csv_path='tests/virtual1.csv',
metadata_path='tests/virtual1.default.csv-metadata.json')
rdf_output = csvw.to_rdf()
g = ConjunctiveGraph()
g.parse(data=rdf_output, format="turtle")
all_subjects = {x for x in g.subjects()}
assert len(all_subjects) == 4
ns = Namespace("http://example.org/")
assert ns['sub-1'] in all_subjects
assert ns['sub-2'] in all_subjects
assert len([g.triples((ns['sub-1'], ns['obj-1'], ns['myvalue']))]) == 1
assert len([g.triples((ns['sub-2'], ns['obj-2'], ns['myvalue']))]) == 1
def test_table_level_about_url():
csvw = CSVW(csv_path='tests/virtual1.csv',
metadata_path='tests/virtual1.table.about_url.csv-metadata.json')
rdf_output = csvw.to_rdf()
g = ConjunctiveGraph()
g.parse(data=rdf_output, format="turtle")
all_subjects = {x for x in g.subjects()}
assert len(all_subjects) == 2
ns = Namespace("http://example.org/")
assert ns['sub-1'] in all_subjects
assert ns['sub-2'] in all_subjects
assert len([g.triples((ns['sub-1'], ns['obj-1'], ns['myvalue']))]) == 1
assert len([g.triples((ns['sub-2'], ns['obj-2'], ns['myvalue']))]) == 1