Matej Klemen
commited on
Commit
•
585afdc
1
Parent(s):
b630229
Add domain information where it exists
Browse files- slo_thesaurus.py +23 -6
slo_thesaurus.py
CHANGED
@@ -1,11 +1,10 @@
|
|
1 |
""" An automatically created Slovene thesaurus. """
|
2 |
import logging
|
3 |
-
import xml.etree.ElementTree as ET
|
4 |
import os
|
|
|
5 |
|
6 |
import datasets
|
7 |
|
8 |
-
|
9 |
_CITATION = """\
|
10 |
@article{krek2017translation,
|
11 |
title={From translation equivalents to synonyms: creation of a Slovene thesaurus using word co-occurrence network analysis},
|
@@ -47,14 +46,16 @@ class SloThesaurus(datasets.GeneratorBasedBuilder):
|
|
47 |
{
|
48 |
"id_words": datasets.Sequence(datasets.Value("string")),
|
49 |
"words": datasets.Sequence(datasets.Value("string")),
|
50 |
-
"scores": datasets.Sequence(datasets.Value("float32"))
|
|
|
51 |
}
|
52 |
],
|
53 |
"groups_near": [
|
54 |
{
|
55 |
"id_words": datasets.Sequence(datasets.Value("string")),
|
56 |
"words": datasets.Sequence(datasets.Value("string")),
|
57 |
-
"scores": datasets.Sequence(datasets.Value("float32"))
|
|
|
58 |
}
|
59 |
]
|
60 |
}
|
@@ -95,13 +96,21 @@ class SloThesaurus(datasets.GeneratorBasedBuilder):
|
|
95 |
core_groups = curr_entry.find("groups_core")
|
96 |
if core_groups is not None:
|
97 |
for idx_group, core_group in enumerate(core_groups.iterfind("group"), start=0):
|
98 |
-
parsed_group = {"id_words": [], "words": [], "scores": []}
|
99 |
all_candidates = core_group.iterfind("candidate")
|
100 |
for candidate in all_candidates:
|
101 |
candidate_s = candidate.find("s")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
parsed_group["id_words"].append(candidate_s.attrib["id"])
|
103 |
parsed_group["words"].append(candidate_s.text.strip())
|
104 |
parsed_group["scores"].append(float(candidate.attrib["score"]))
|
|
|
105 |
|
106 |
all_core_groups.append(parsed_group)
|
107 |
|
@@ -109,13 +118,21 @@ class SloThesaurus(datasets.GeneratorBasedBuilder):
|
|
109 |
near_groups = curr_entry.find("groups_near")
|
110 |
if near_groups is not None:
|
111 |
for idx_group, core_group in enumerate(near_groups.iterfind("group"), start=0):
|
112 |
-
parsed_group = {"id_words": [], "words": [], "scores": []}
|
113 |
all_candidates = core_group.iterfind("candidate")
|
114 |
for candidate in all_candidates:
|
115 |
candidate_s = candidate.find("s")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
parsed_group["id_words"].append(candidate_s.attrib["id"])
|
117 |
parsed_group["words"].append(candidate_s.text.strip())
|
118 |
parsed_group["scores"].append(float(candidate.attrib["score"]))
|
|
|
119 |
|
120 |
all_near_groups.append(parsed_group)
|
121 |
|
|
|
1 |
""" An automatically created Slovene thesaurus. """
|
2 |
import logging
|
|
|
3 |
import os
|
4 |
+
import xml.etree.ElementTree as ET
|
5 |
|
6 |
import datasets
|
7 |
|
|
|
8 |
_CITATION = """\
|
9 |
@article{krek2017translation,
|
10 |
title={From translation equivalents to synonyms: creation of a Slovene thesaurus using word co-occurrence network analysis},
|
|
|
46 |
{
|
47 |
"id_words": datasets.Sequence(datasets.Value("string")),
|
48 |
"words": datasets.Sequence(datasets.Value("string")),
|
49 |
+
"scores": datasets.Sequence(datasets.Value("float32")),
|
50 |
+
"domains": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
|
51 |
}
|
52 |
],
|
53 |
"groups_near": [
|
54 |
{
|
55 |
"id_words": datasets.Sequence(datasets.Value("string")),
|
56 |
"words": datasets.Sequence(datasets.Value("string")),
|
57 |
+
"scores": datasets.Sequence(datasets.Value("float32")),
|
58 |
+
"domains": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
|
59 |
}
|
60 |
]
|
61 |
}
|
|
|
96 |
core_groups = curr_entry.find("groups_core")
|
97 |
if core_groups is not None:
|
98 |
for idx_group, core_group in enumerate(core_groups.iterfind("group"), start=0):
|
99 |
+
parsed_group = {"id_words": [], "words": [], "scores": [], "domains": []}
|
100 |
all_candidates = core_group.iterfind("candidate")
|
101 |
for candidate in all_candidates:
|
102 |
candidate_s = candidate.find("s")
|
103 |
+
candidate_domains = candidate.find("labels")
|
104 |
+
if candidate_domains is not None:
|
105 |
+
candidate_domains = list(map(lambda candidate_el: candidate_el.text.strip(),
|
106 |
+
candidate_domains.findall("la")))
|
107 |
+
else:
|
108 |
+
candidate_domains = []
|
109 |
+
|
110 |
parsed_group["id_words"].append(candidate_s.attrib["id"])
|
111 |
parsed_group["words"].append(candidate_s.text.strip())
|
112 |
parsed_group["scores"].append(float(candidate.attrib["score"]))
|
113 |
+
parsed_group["domains"].append(candidate_domains)
|
114 |
|
115 |
all_core_groups.append(parsed_group)
|
116 |
|
|
|
118 |
near_groups = curr_entry.find("groups_near")
|
119 |
if near_groups is not None:
|
120 |
for idx_group, core_group in enumerate(near_groups.iterfind("group"), start=0):
|
121 |
+
parsed_group = {"id_words": [], "words": [], "scores": [], "domains": []}
|
122 |
all_candidates = core_group.iterfind("candidate")
|
123 |
for candidate in all_candidates:
|
124 |
candidate_s = candidate.find("s")
|
125 |
+
candidate_domains = candidate.find("labels")
|
126 |
+
if candidate_domains is not None:
|
127 |
+
candidate_domains = list(map(lambda candidate_el: candidate_el.text.strip(),
|
128 |
+
candidate_domains.findall("la")))
|
129 |
+
else:
|
130 |
+
candidate_domains = []
|
131 |
+
|
132 |
parsed_group["id_words"].append(candidate_s.attrib["id"])
|
133 |
parsed_group["words"].append(candidate_s.text.strip())
|
134 |
parsed_group["scores"].append(float(candidate.attrib["score"]))
|
135 |
+
parsed_group["domains"].append(candidate_domains)
|
136 |
|
137 |
all_near_groups.append(parsed_group)
|
138 |
|