File size: 2,941 Bytes
52d5f6c
e0dd3d8
52d5f6c
25a3f03
e0dd3d8
52d5f6c
 
 
 
 
 
 
 
25a3f03
52d5f6c
 
e0dd3d8
 
 
 
 
 
 
 
 
 
 
 
52d5f6c
 
 
 
 
 
 
e0dd3d8
52d5f6c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25a3f03
52d5f6c
 
 
 
 
e0dd3d8
52d5f6c
e0dd3d8
 
 
52d5f6c
25a3f03
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import os
from datasets import DatasetBuilder, SplitGenerator, DownloadConfig, load_dataset, DownloadManager, DatasetInfo
from rdflib import Graph, URIRef, Literal, BNode
from rdflib.namespace import RDF, RDFS, OWL, XSD, Namespace, NamespaceManager
from datasets.features import Features, Value

SCHEMA = Namespace('http://schema.org/')

YAGO = Namespace('http://yago-knowledge.org/resource/')

class YAGO45DatasetBuilder(DatasetBuilder):
    VERSION = "1.0.0"

    taxonomy = Graph(bind_namespaces="core")
    
    def _info(self):
        print("INFO")
        return DatasetInfo(
            description="A subset of the YAGO 4.5 dataset maintaining only English labels",
            citation="@article{suchanek2023integrating,title={Integrating the Wikidata Taxonomy into YAGO},author={Suchanek, Fabian M and Alam, Mehwish and Bonald, Thomas and Paris, Pierre-Henri and Soria, Jules},journal={arXiv preprint arXiv:2308.11884},year={2023}}",
            homepage="https://yago-knowledge.org/",
            license="https://creativecommons.org/licenses/by-sa/3.0/",
            features=Features({
                'subject': Value('string'),
                'predicate': Value('string'),
                'object': Value('string')
            })
        )
    
    def _split_generators(self, dl_manager):
        # Download and extract the dataset
        # Define splits for each chunk of your dataset.
        
        # Download and extract the dataset files
        dl_manager.download_config = DownloadConfig(cache_dir=os.path.abspath("raw"))
        dl_manager.download_and_extract(["facts.tar.gz", "yago-taxonomy.ttl"])
        
        # Load yago-taxonomy.ttl file in every process
        self.taxonomy.parse(os.path.join(dl_manager.manual_dir, 'yago-taxonomy.ttl'), format='turtle')
        
        # Extract prefix mappings 
        prefix_mappings = {prefix: namespace for prefix, namespace in self.taxonomy.namespaces()}
        
        # Define splits for each chunk
        chunk_paths = [os.path.join(dl_manager.manual_dir, chunk) for chunk in os.listdir(dl_manager.manual_dir) if chunk.endswith('.nt')]
        return [SplitGenerator(name="train", gen_kwargs={'chunk_paths': chunk_paths, 'prefix_mappings': prefix_mappings})]
    
    def _generate_examples(self, chunk_paths, prefix_mappings):
        # Load the chunks into an rdflib graph
        # Yield individual triples from the graph
        for chunk_path in chunk_paths:
            graph = Graph(bind_namespaces="core")
            for prefix, namespace in prefix_mappings.items():
                graph.bind(prefix, namespace)
            graph.parse(chunk_path, format='nt')
            
            # Yield individual triples from the graph
            for i, (s, p, o) in enumerate(graph):
                yield i, {
                    'subject': str(s),
                    'predicate': str(p),
                    'object': str(o)
                }