Matej Klemen
commited on
Commit
•
fb63763
1
Parent(s):
b90cce2
Add first version of SentiCoref loading script
Browse files- README.md +45 -3
- dataset_infos.json +1 -0
- senticoref.py +271 -0
README.md
CHANGED
@@ -1,3 +1,45 @@
|
|
1 |
-
---
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
dataset_info:
|
3 |
+
features:
|
4 |
+
- name: id_doc
|
5 |
+
dtype: string
|
6 |
+
- name: words
|
7 |
+
sequence:
|
8 |
+
sequence:
|
9 |
+
sequence: string
|
10 |
+
- name: lemmas
|
11 |
+
sequence:
|
12 |
+
sequence:
|
13 |
+
sequence: string
|
14 |
+
- name: msds
|
15 |
+
sequence:
|
16 |
+
sequence:
|
17 |
+
sequence: string
|
18 |
+
- name: ne_tags
|
19 |
+
sequence:
|
20 |
+
sequence:
|
21 |
+
sequence: string
|
22 |
+
- name: mentions
|
23 |
+
list:
|
24 |
+
- name: id_mention
|
25 |
+
dtype: string
|
26 |
+
- name: mention_data
|
27 |
+
struct:
|
28 |
+
- name: idx_par
|
29 |
+
dtype: uint32
|
30 |
+
- name: idx_sent
|
31 |
+
dtype: uint32
|
32 |
+
- name: word_indices
|
33 |
+
sequence: uint32
|
34 |
+
- name: global_word_indices
|
35 |
+
sequence: uint32
|
36 |
+
- name: coref_clusters
|
37 |
+
sequence:
|
38 |
+
sequence: string
|
39 |
+
splits:
|
40 |
+
- name: train
|
41 |
+
num_bytes: 21547216
|
42 |
+
num_examples: 756
|
43 |
+
download_size: 21892324
|
44 |
+
dataset_size: 21547216
|
45 |
+
---
|
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"default": {"description": "Slovene corpus for coreference resolution. Contains automatically(?) annotated named entities, manually annotated \ncoreferences, and manually verified lemmas and morphosyntactic tags.\n", "citation": "@misc{suk,\n title = {Training corpus {SUK} 1.0},\n author = {Arhar Holdt, {\u000b S}pela and Krek, Simon and Dobrovoljc, Kaja and Erjavec, Toma{\u000b z} and Gantar, Polona and {\u000b C}ibej, Jaka and Pori, Eva and Ter{\u000b c}on, Luka and Munda, Tina and {\u000b Z}itnik, Slavko and Robida, Nejc and Blagus, Neli and Mo{\u000b z}e, Sara and Ledinek, Nina and Holz, Nanika and Zupan, Katja and Kuzman, Taja and Kav{\u000b c}i{\u000b c}, Teja and {\u000b S}krjanec, Iza and Marko, Dafne and Jezer{\u000b s}ek, Lucija and Zajc, Anja},\n url = {http://hdl.handle.net/11356/1747},\n note = {Slovenian language resource repository {CLARIN}.{SI}},\n year = {2022}\n}\n", "homepage": "http://hdl.handle.net/11356/1747", "license": "Creative Commons - Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"id_doc": {"dtype": "string", "id": null, "_type": "Value"}, "words": {"feature": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "lemmas": {"feature": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "msds": {"feature": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "ne_tags": {"feature": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "mentions": [{"id_mention": {"dtype": "string", "id": null, "_type": "Value"}, "mention_data": {"idx_par": {"dtype": "uint32", "id": null, "_type": "Value"}, "idx_sent": {"dtype": "uint32", "id": null, "_type": "Value"}, "word_indices": {"feature": {"dtype": "uint32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "global_word_indices": {"feature": {"dtype": "uint32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}}], "coref_clusters": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "senticoref", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 21632604, "num_examples": 756, "dataset_name": "senticoref"}}, "download_checksums": {"https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1747/SUK.TEI.zip": {"num_bytes": 20906601, "checksum": "ae81fd3712e277f9ec6b2b3b076eb80b50c01704d6e644ca932b2013108a8f99"}}, "download_size": 20906601, "post_processing_size": null, "dataset_size": 21632604, "size_in_bytes": 42539205}}
|
senticoref.py
ADDED
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Slovene corpus for coreference resolution. """
|
2 |
+
import os
|
3 |
+
from collections import OrderedDict
|
4 |
+
from copy import deepcopy
|
5 |
+
from typing import Dict
|
6 |
+
|
7 |
+
import datasets
|
8 |
+
import xml.etree.ElementTree as ET
|
9 |
+
import re
|
10 |
+
|
11 |
+
|
12 |
+
_CITATION = """\
|
13 |
+
@misc{suk,
|
14 |
+
title = {Training corpus {SUK} 1.1},
|
15 |
+
author = {Arhar Holdt, {\v S}pela and Krek, Simon and Dobrovoljc, Kaja and Erjavec, Toma{\v z} and Gantar, Polona and {\v C}ibej, Jaka and Pori, Eva and Ter{\v c}on, Luka and Munda, Tina and {\v Z}itnik, Slavko and Robida, Nejc and Blagus, Neli and Mo{\v z}e, Sara and Ledinek, Nina and Holz, Nanika and Zupan, Katja and Kuzman, Taja and Kav{\v c}i{\v c}, Teja and {\v S}krjanec, Iza and Marko, Dafne and Jezer{\v s}ek, Lucija and Zajc, Anja},
|
16 |
+
url = {http://hdl.handle.net/11356/1959},
|
17 |
+
note = {Slovenian language resource repository {CLARIN}.{SI}},
|
18 |
+
year = {2024}
|
19 |
+
}
|
20 |
+
"""
|
21 |
+
|
22 |
+
_DESCRIPTION = """\
|
23 |
+
Slovene corpus for coreference resolution. Contains automatically(?) annotated named entities, manually annotated
|
24 |
+
coreferences, and manually verified lemmas and morphosyntactic tags.
|
25 |
+
"""
|
26 |
+
|
27 |
+
_HOMEPAGE = "http://hdl.handle.net/11356/1959"
|
28 |
+
|
29 |
+
_LICENSE = "Creative Commons - Attribution-{ShareAlike} 4.0 International ({CC} {BY}-{SA} 4.0)"
|
30 |
+
|
31 |
+
_URLS = {
|
32 |
+
"suk.tei": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1959/SUK.TEI.zip",
|
33 |
+
}
|
34 |
+
|
35 |
+
|
36 |
+
XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}"
|
37 |
+
|
38 |
+
|
39 |
+
def namespace(element):
|
40 |
+
# https://stackoverflow.com/a/12946675
|
41 |
+
m = re.match(r'\{.*\}', element.tag)
|
42 |
+
return m.group(0) if m else ''
|
43 |
+
|
44 |
+
|
45 |
+
def recursively_parse_el(el_tag, opened_ne: str = "O", opened_mentions: list = None) -> Dict:
|
46 |
+
"""
|
47 |
+
:param el_tag: XML ETree tag
|
48 |
+
:param opened_ne: Named entity tag encountered at the previous level(s) of the recursive parse
|
49 |
+
:param opened_mentions: IDs of mentions encountered at the previous level(s) of the recursive parse.
|
50 |
+
The word in the current tag is part of these mentions.
|
51 |
+
"""
|
52 |
+
eff_opened_mentions = opened_mentions if opened_mentions is not None else []
|
53 |
+
id_words, words, lemmas, msds, ne_tags = [], [], [], [], []
|
54 |
+
mention_to_id_word = {}
|
55 |
+
|
56 |
+
if el_tag.tag.endswith(("w", "pc")):
|
57 |
+
id_word = el_tag.attrib[f"{XML_NAMESPACE}id"]
|
58 |
+
word_str = el_tag.text.strip()
|
59 |
+
lemma_str = el_tag.attrib["lemma"]
|
60 |
+
msd_str = el_tag.attrib["ana"]
|
61 |
+
|
62 |
+
id_words.append(id_word)
|
63 |
+
words.append(word_str)
|
64 |
+
lemmas.append(lemma_str)
|
65 |
+
msds.append(msd_str)
|
66 |
+
ne_tags.append(opened_ne)
|
67 |
+
|
68 |
+
for _id in eff_opened_mentions:
|
69 |
+
_existing = mention_to_id_word.get(_id, [])
|
70 |
+
_existing.append(id_word)
|
71 |
+
|
72 |
+
mention_to_id_word[_id] = _existing
|
73 |
+
|
74 |
+
# Named entity or some other type of coreference mention
|
75 |
+
elif el_tag.tag.endswith("seg"):
|
76 |
+
new_ne = opened_ne
|
77 |
+
if el_tag.attrib["type"] == "name":
|
78 |
+
assert opened_ne == "O", f"Potentially encountered a nested NE ({opened_ne}, {el_tag['subtype'].upper()})"
|
79 |
+
new_ne = el_tag.attrib["subtype"].upper()
|
80 |
+
|
81 |
+
# Discard information about derived named entities
|
82 |
+
if new_ne.startswith("DERIV-"):
|
83 |
+
new_ne = new_ne[len("DERIV-"):]
|
84 |
+
|
85 |
+
# The mentions can be nested multiple levels, keep track of all mentions at current or shallower level
|
86 |
+
id_mention = el_tag.attrib[f"{XML_NAMESPACE}id"]
|
87 |
+
_opened_copy = deepcopy(eff_opened_mentions)
|
88 |
+
_opened_copy.append(id_mention)
|
89 |
+
|
90 |
+
for _i, _child in enumerate(el_tag):
|
91 |
+
_res = recursively_parse_el(_child, opened_ne=new_ne, opened_mentions=_opened_copy)
|
92 |
+
|
93 |
+
id_words.extend(_res["id_words"])
|
94 |
+
words.extend(_res["words"])
|
95 |
+
lemmas.extend(_res["lemmas"])
|
96 |
+
msds.extend(_res["msds"])
|
97 |
+
ne_tags.extend(_res["ne_tags"])
|
98 |
+
|
99 |
+
for _id_mention, _id_words in _res["mentions"].items():
|
100 |
+
_existing = mention_to_id_word.get(_id_mention, [])
|
101 |
+
_existing.extend(_id_words)
|
102 |
+
mention_to_id_word[_id_mention] = _existing
|
103 |
+
|
104 |
+
if new_ne != "O": # IOB2
|
105 |
+
ne_tags = [f"B-{_tag}" if _i == 0 else f"I-{_tag}" for _i, _tag in enumerate(ne_tags)]
|
106 |
+
|
107 |
+
else:
|
108 |
+
print(f"WARNING: unrecognized tag in `recursively_parse_el`: {el_tag}. "
|
109 |
+
f"Please open an issue on the HuggingFace datasets repository.")
|
110 |
+
|
111 |
+
return {
|
112 |
+
"id_words": id_words, "words": words, "lemmas": lemmas, "msds": msds, "ne_tags": ne_tags,
|
113 |
+
"mentions": mention_to_id_word
|
114 |
+
}
|
115 |
+
|
116 |
+
|
117 |
+
def parse_sent(sent_tag):
|
118 |
+
sent_info = {
|
119 |
+
"id_words": [], "words": [], "lemmas": [], "msds": [], "ne_tags": [],
|
120 |
+
"mentions": {}
|
121 |
+
}
|
122 |
+
|
123 |
+
for el in sent_tag:
|
124 |
+
if el.tag.endswith("linkGrp"):
|
125 |
+
# Parse coreference clusters later, outside of this function
|
126 |
+
continue
|
127 |
+
|
128 |
+
res = recursively_parse_el(el)
|
129 |
+
sent_info["id_words"].extend(res["id_words"])
|
130 |
+
sent_info["words"].extend(res["words"])
|
131 |
+
sent_info["lemmas"].extend(res["lemmas"])
|
132 |
+
sent_info["msds"].extend(res["msds"])
|
133 |
+
sent_info["ne_tags"].extend(res["ne_tags"])
|
134 |
+
sent_info["mentions"].update(res["mentions"])
|
135 |
+
|
136 |
+
return sent_info
|
137 |
+
|
138 |
+
|
139 |
+
class SentiCoref(datasets.GeneratorBasedBuilder):
|
140 |
+
"""Slovene corpus for coreference resolution."""
|
141 |
+
|
142 |
+
VERSION = datasets.Version("1.0.0")
|
143 |
+
|
144 |
+
def _info(self):
|
145 |
+
features = datasets.Features(
|
146 |
+
{
|
147 |
+
"id_doc": datasets.Value("string"),
|
148 |
+
"words": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("string")))),
|
149 |
+
"lemmas": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("string")))),
|
150 |
+
"msds": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("string")))),
|
151 |
+
"ne_tags": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("string")))),
|
152 |
+
"mentions": [{
|
153 |
+
"id_mention": datasets.Value("string"),
|
154 |
+
"mention_data": {
|
155 |
+
"idx_par": datasets.Value("uint32"),
|
156 |
+
"idx_sent": datasets.Value("uint32"),
|
157 |
+
"word_indices": datasets.Sequence(datasets.Value("uint32")),
|
158 |
+
"global_word_indices": datasets.Sequence(datasets.Value("uint32"))
|
159 |
+
}
|
160 |
+
}],
|
161 |
+
"coref_clusters": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
|
162 |
+
}
|
163 |
+
)
|
164 |
+
|
165 |
+
return datasets.DatasetInfo(
|
166 |
+
description=_DESCRIPTION,
|
167 |
+
features=features,
|
168 |
+
homepage=_HOMEPAGE,
|
169 |
+
license=_LICENSE,
|
170 |
+
citation=_CITATION,
|
171 |
+
)
|
172 |
+
|
173 |
+
def _split_generators(self, dl_manager):
|
174 |
+
urls = _URLS["suk.tei"]
|
175 |
+
data_dir = dl_manager.download_and_extract(urls)
|
176 |
+
return [
|
177 |
+
datasets.SplitGenerator(
|
178 |
+
name=datasets.Split.TRAIN,
|
179 |
+
gen_kwargs={"file_path": os.path.join(data_dir, "SUK.TEI", "senticoref.xml")}
|
180 |
+
)
|
181 |
+
]
|
182 |
+
|
183 |
+
def _generate_examples(self, file_path):
|
184 |
+
curr_doc = ET.parse(file_path)
|
185 |
+
root = curr_doc.getroot()
|
186 |
+
NAMESPACE = namespace(root)
|
187 |
+
|
188 |
+
for idx_doc, doc in enumerate(root.iterfind(f"{NAMESPACE}div")):
|
189 |
+
id2tokinfo = {}
|
190 |
+
|
191 |
+
doc_words, doc_lemmas, doc_msds, doc_ne_tags = [], [], [], []
|
192 |
+
doc_mentions = {}
|
193 |
+
doc_position = 0
|
194 |
+
|
195 |
+
# Step 1: Extract everything but the coreference clusters
|
196 |
+
# Clusters are marked at sentence level so they are often duplicated - find unique clusters afterwards
|
197 |
+
for idx_par, par in enumerate(doc.findall(f"{NAMESPACE}p")):
|
198 |
+
par_words, par_lemmas, par_msds, par_ne_tags = [], [], [], []
|
199 |
+
|
200 |
+
for idx_sent, sent in enumerate(par.findall(f"{NAMESPACE}s")):
|
201 |
+
sent_data = parse_sent(sent)
|
202 |
+
|
203 |
+
par_words.append(sent_data["words"])
|
204 |
+
par_lemmas.append(sent_data["lemmas"])
|
205 |
+
par_msds.append(sent_data["msds"])
|
206 |
+
par_ne_tags.append(sent_data["ne_tags"])
|
207 |
+
|
208 |
+
for pos_in_sent, (id_token, word_str, lemma_str, msd_str) in enumerate(zip(sent_data["id_words"],
|
209 |
+
sent_data["words"],
|
210 |
+
sent_data["lemmas"],
|
211 |
+
sent_data["msds"])):
|
212 |
+
id2tokinfo[id_token] = {
|
213 |
+
"idx_par": idx_par, "idx_sent": idx_sent, "pos_in_sent": pos_in_sent,
|
214 |
+
"doc_position": doc_position
|
215 |
+
}
|
216 |
+
doc_position += 1
|
217 |
+
|
218 |
+
for id_mention, word_ids in sent_data["mentions"].items():
|
219 |
+
mention_fmt = {
|
220 |
+
"idx_par": idx_par, "idx_sent": idx_sent, "word_indices": [],
|
221 |
+
"global_word_indices": []
|
222 |
+
}
|
223 |
+
|
224 |
+
for _id in word_ids:
|
225 |
+
_info = id2tokinfo[_id]
|
226 |
+
mention_fmt["word_indices"].append(_info["pos_in_sent"])
|
227 |
+
mention_fmt["global_word_indices"].append(_info["doc_position"])
|
228 |
+
|
229 |
+
doc_mentions[id_mention] = mention_fmt
|
230 |
+
|
231 |
+
doc_words.append(par_words)
|
232 |
+
doc_lemmas.append(par_lemmas)
|
233 |
+
doc_msds.append(par_msds)
|
234 |
+
doc_ne_tags.append(par_ne_tags)
|
235 |
+
|
236 |
+
# Step 2: extract coreference clusters
|
237 |
+
unique_clusters = OrderedDict() # Preserving order just in case
|
238 |
+
for link_group in doc.findall(f".//{NAMESPACE}linkGrp[@type = 'COREF']"):
|
239 |
+
for link in link_group.findall(f"{NAMESPACE}link"):
|
240 |
+
# Remove the reference marker ("#") in front of ID
|
241 |
+
cluster = tuple(map(lambda _s: _s[1:], link.attrib["target"].split(" ")))
|
242 |
+
unique_clusters[cluster] = None
|
243 |
+
|
244 |
+
doc_clusters = []
|
245 |
+
for cluster in unique_clusters:
|
246 |
+
doc_clusters.append(list(cluster))
|
247 |
+
for id_mention in cluster:
|
248 |
+
if id_mention not in doc_mentions:
|
249 |
+
# Mention may be a regular token, i.e. a word referring to an entity
|
250 |
+
# (`id_mention` is then the ID of a token)
|
251 |
+
_info = id2tokinfo[id_mention]
|
252 |
+
doc_mentions[id_mention] = {
|
253 |
+
"idx_par": _info["idx_par"], "idx_sent": _info["idx_sent"],
|
254 |
+
"word_indices": [_info["pos_in_sent"]],
|
255 |
+
"global_word_indices": [_info["doc_position"]]
|
256 |
+
}
|
257 |
+
|
258 |
+
# Convert to list of dictionaries as datasets expects fixed key names
|
259 |
+
doc_mentions_list = []
|
260 |
+
for id_mention, mention_data in doc_mentions.items():
|
261 |
+
doc_mentions_list.append({
|
262 |
+
"id_mention": id_mention,
|
263 |
+
"mention_data": mention_data
|
264 |
+
})
|
265 |
+
|
266 |
+
yield idx_doc, {
|
267 |
+
"id_doc": doc.attrib[f"{XML_NAMESPACE}id"],
|
268 |
+
"words": doc_words, "lemmas": doc_lemmas, "msds": doc_msds, "ne_tags": doc_ne_tags,
|
269 |
+
"mentions": doc_mentions_list,
|
270 |
+
"coref_clusters": doc_clusters
|
271 |
+
}
|