Datasets:
cjvt
/

Tasks:
Other
Modalities:
Text
Languages:
Slovenian
Libraries:
Datasets
License:
File size: 6,477 Bytes
b630229
 
 
585afdc
b630229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
585afdc
 
b630229
 
 
 
 
 
585afdc
 
b630229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
585afdc
b630229
 
 
585afdc
 
 
 
 
 
 
b630229
 
 
585afdc
b630229
 
 
 
 
 
 
585afdc
b630229
 
 
585afdc
 
 
 
 
 
 
b630229
 
 
585afdc
b630229
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
""" An automatically created Slovene thesaurus. """
import logging
import os
import xml.etree.ElementTree as ET

import datasets

_CITATION = """\
@article{krek2017translation,
  title={From translation equivalents to synonyms: creation of a Slovene thesaurus using word co-occurrence network analysis},
  author={Krek, Simon and Laskowski, Cyprian and Robnik-{\v{S}}ikonja, Marko},
  journal={Proceedings of eLex},
  pages={93--109},
  year={2017}
}
"""

_DESCRIPTION = """\
This is an automatically created Slovene thesaurus from Slovene data available in a comprehensive 
English–Slovenian dictionary, a monolingual dictionary, and a corpus. A network analysis on the bilingual dictionary 
word co-occurrence graph was used, together with additional information from the distributional thesaurus data 
available as part of the Sketch Engine tool and extracted from the 1.2 billion word Gigafida corpus and the 
monolingual dictionary.
"""

_HOMEPAGE = "http://hdl.handle.net/11356/1166"

_LICENSE = "Creative Commons - Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"

_URLS = {
    "slo_thesaurus": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1166/CJVT_Thesaurus-v1.0.zip",
}


class SloThesaurus(datasets.GeneratorBasedBuilder):
    """An automatically created Slovene thesaurus."""

    VERSION = datasets.Version("1.0.0")

    def _info(self):
        features = datasets.Features(
            {
                "id_headword": datasets.Value("string"),
                "headword": datasets.Value("string"),
                "groups_core": [
                    {
                        "id_words": datasets.Sequence(datasets.Value("string")),
                        "words": datasets.Sequence(datasets.Value("string")),
                        "scores": datasets.Sequence(datasets.Value("float32")),
                        "domains": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
                    }
                ],
                "groups_near": [
                    {
                        "id_words": datasets.Sequence(datasets.Value("string")),
                        "words": datasets.Sequence(datasets.Value("string")),
                        "scores": datasets.Sequence(datasets.Value("float32")),
                        "domains": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
                    }
                ]
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        urls = _URLS["slo_thesaurus"]
        data_dir = dl_manager.download_and_extract(urls)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"file_path": os.path.join(data_dir, "CJVT_Thesaurus-v1.0.xml")}
            )
        ]

    def _generate_examples(self, file_path):
        curr_doc = ET.parse(file_path)
        root = curr_doc.getroot()

        for idx_entry, curr_entry in enumerate(root.iterfind(f".//entry")):
            head_word = curr_entry.find("headword")
            if head_word is None:
                logging.warning("<headword> is missing for an entry, which should likely not happen. "
                                "Please open an issue on the dataset repository if you are seeing this.")
                head_word = {"text": "UNK_headword", "id": "NA_id"}
            else:
                head_word = {"text": head_word.text.strip(), "id": head_word.attrib["id"]}

            all_core_groups = []
            core_groups = curr_entry.find("groups_core")
            if core_groups is not None:
                for idx_group, core_group in enumerate(core_groups.iterfind("group"), start=0):
                    parsed_group = {"id_words": [], "words": [], "scores": [], "domains": []}
                    all_candidates = core_group.iterfind("candidate")
                    for candidate in all_candidates:
                        candidate_s = candidate.find("s")
                        candidate_domains = candidate.find("labels")
                        if candidate_domains is not None:
                            candidate_domains = list(map(lambda candidate_el: candidate_el.text.strip(),
                                                         candidate_domains.findall("la")))
                        else:
                            candidate_domains = []

                        parsed_group["id_words"].append(candidate_s.attrib["id"])
                        parsed_group["words"].append(candidate_s.text.strip())
                        parsed_group["scores"].append(float(candidate.attrib["score"]))
                        parsed_group["domains"].append(candidate_domains)

                    all_core_groups.append(parsed_group)

            all_near_groups = []
            near_groups = curr_entry.find("groups_near")
            if near_groups is not None:
                for idx_group, core_group in enumerate(near_groups.iterfind("group"), start=0):
                    parsed_group = {"id_words": [], "words": [], "scores": [], "domains": []}
                    all_candidates = core_group.iterfind("candidate")
                    for candidate in all_candidates:
                        candidate_s = candidate.find("s")
                        candidate_domains = candidate.find("labels")
                        if candidate_domains is not None:
                            candidate_domains = list(map(lambda candidate_el: candidate_el.text.strip(),
                                                         candidate_domains.findall("la")))
                        else:
                            candidate_domains = []

                        parsed_group["id_words"].append(candidate_s.attrib["id"])
                        parsed_group["words"].append(candidate_s.text.strip())
                        parsed_group["scores"].append(float(candidate.attrib["score"]))
                        parsed_group["domains"].append(candidate_domains)

                    all_near_groups.append(parsed_group)

            yield idx_entry, {
                "id_headword": head_word["id"],
                "headword": head_word["text"],
                "groups_core": all_core_groups,
                "groups_near": all_near_groups
            }