wikipunk commited on
Commit
ce3ecb3
1 Parent(s): 4eca7c8

add id_ when generating

Browse files
Files changed (2) hide show
  1. .gitignore +1 -0
  2. yago45en.py +12 -21
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ __pycache__/
yago45en.py CHANGED
@@ -12,8 +12,6 @@ YAGO = Namespace('http://yago-knowledge.org/resource/')
12
  class YAGO45DatasetBuilder(GeneratorBasedBuilder):
13
  VERSION = "1.0.0"
14
 
15
- taxonomy = Graph(bind_namespaces="core")
16
-
17
  def _info(self):
18
  return DatasetInfo(
19
  description="A subset of the YAGO 4.5 dataset maintaining only English labels",
@@ -36,31 +34,24 @@ class YAGO45DatasetBuilder(GeneratorBasedBuilder):
36
 
37
  facts = os.path.join(facts, "tmp/yago/")
38
 
39
- # Load yago-taxonomy.ttl file in every process
40
- self.taxonomy.parse(taxonomy)
41
-
42
- # Extract prefix mappings
43
- prefix_mappings = {prefix: namespace for prefix, namespace in self.taxonomy.namespaces()}
44
-
45
- # Define splits for each chunk
46
  chunk_paths = [os.path.join(facts, chunk) for chunk in os.listdir(facts) if chunk.endswith('.nt')]
47
  return [SplitGenerator(name=datasets.Split.TRAIN,
48
- gen_kwargs={'chunk_paths': chunk_paths,
49
- 'prefix_mappings': prefix_mappings})]
50
 
51
- def _generate_examples(self, chunk_paths, prefix_mappings):
52
  # Load the chunks into an rdflib graph
53
  # Yield individual triples from the graph
 
54
  for chunk_path in chunk_paths:
55
  graph = Graph(bind_namespaces="core")
56
- for prefix, namespace in prefix_mappings.items():
57
- graph.bind(prefix, namespace)
58
- graph.parse(chunk_path, format='nt')
59
 
60
- # Yield individual triples from the graph
61
- for i, (s, p, o) in enumerate(graph):
62
- yield i, {
63
- 'subject': str(s),
64
- 'predicate': str(p),
65
- 'object': str(o)
66
  }
 
 
12
  class YAGO45DatasetBuilder(GeneratorBasedBuilder):
13
  VERSION = "1.0.0"
14
 
 
 
15
  def _info(self):
16
  return DatasetInfo(
17
  description="A subset of the YAGO 4.5 dataset maintaining only English labels",
 
34
 
35
  facts = os.path.join(facts, "tmp/yago/")
36
 
37
+ # Define splits for each chunk of your dataset.
 
 
 
 
 
 
38
  chunk_paths = [os.path.join(facts, chunk) for chunk in os.listdir(facts) if chunk.endswith('.nt')]
39
  return [SplitGenerator(name=datasets.Split.TRAIN,
40
+ gen_kwargs={'chunk_paths': chunk_paths})]
 
41
 
42
+ def _generate_examples(self, chunk_paths):
43
  # Load the chunks into an rdflib graph
44
  # Yield individual triples from the graph
45
+ id_ = 0
46
  for chunk_path in chunk_paths:
47
  graph = Graph(bind_namespaces="core")
48
+ graph.parse(chunk_path)
 
 
49
 
50
+ # Yield individual triples from the graph as N3
51
+ for (s, p, o) in graph.triples((None, None, None)):
52
+ yield id_, {
53
+ 'subject': s.n3(),
54
+ 'predicate': p.n3(),
55
+ 'object': o.n3()
56
  }
57
+ id_ += 1