Datasets:

Languages:
English
ArXiv:
License:
holylovenia commited on
Commit
c13b50a
1 Parent(s): a129c9f

Upload stb_ext.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. stb_ext.py +195 -0
stb_ext.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+
3
+ import conllu
4
+ import datasets
5
+
6
+ from seacrowd.utils.common_parser import load_ud_data_as_seacrowd_kb
7
+ from seacrowd.utils.configs import SEACrowdConfig
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.constants import DEFAULT_SEACROWD_VIEW_NAME, DEFAULT_SOURCE_VIEW_NAME, Licenses, Tasks
10
+
11
+ _DATASETNAME = "stb_ext"
12
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
13
+ _UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
14
+
15
+ _LANGUAGES = ["eng"]
16
+ _LOCAL = False
17
+ _CITATION = """\
18
+ @article{wang2019genesis,
19
+ title={From genesis to creole language: Transfer learning for singlish universal dependencies parsing and POS tagging},
20
+ author={Wang, Hongmin and Yang, Jie and Zhang, Yue},
21
+ journal={ACM Transactions on Asian and Low-Resource Language Information Processing (TALLIP)},
22
+ volume={19},
23
+ number={1},
24
+ pages={1--29},
25
+ year={2019},
26
+ publisher={ACM New York, NY, USA}
27
+ }
28
+ """
29
+
30
+ _DESCRIPTION = """\
31
+ We adopt the Universal Dependencies protocol for constructing the Singlish dependency treebank, both as a new resource
32
+ for the low-resource languages and to facilitate knowledge transfer from English. Briefly, the STB-EXT dataset offers
33
+ a 3-times larger training set, while keeping the same dev and test sets from STB-ACL. We provide treebanks with both
34
+ gold-standard as well as automatically generated POS tags.
35
+ """
36
+
37
+ _HOMEPAGE = "https://github.com/wanghm92/Sing_Par/tree/master/TALLIP19_dataset/treebank"
38
+
39
+ _LICENSE = Licenses.MIT.value
40
+
41
+ _PREFIX = "https://raw.githubusercontent.com/wanghm92/Sing_Par/master/TALLIP19_dataset/treebank/"
42
+ _URLS = {
43
+ "gold_pos": {
44
+ "train": _PREFIX + "gold_pos/train.ext.conll",
45
+ },
46
+ "en_ud_autopos": {"train": _PREFIX + "en-ud-autopos/en-ud-train.conllu.autoupos", "validation": _PREFIX + "en-ud-autopos/en-ud-dev.conllu.ann.auto.epoch24.upos", "test": _PREFIX + "en-ud-autopos/en-ud-test.conllu.ann.auto.epoch24.upos"},
47
+ "auto_pos_multiview": {
48
+ "train": _PREFIX + "auto_pos/multiview/train.autopos.multiview.conll",
49
+ "validation": _PREFIX + "auto_pos/multiview/dev.autopos.multiview.conll",
50
+ "test": _PREFIX + "auto_pos/multiview/test.autopos.multiview.conll",
51
+ },
52
+ "auto_pos_stack": {
53
+ "train": _PREFIX + "auto_pos/stack/train.autopos.stack.conll",
54
+ "validation": _PREFIX + "auto_pos/stack/dev.autopos.stack.conll",
55
+ "test": _PREFIX + "auto_pos/stack/test.autopos.stack.conll",
56
+ },
57
+ }
58
+ _POSTAGS = ["ADJ", "ADP", "ADV", "AUX", "CONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", "root"]
59
+ _SUPPORTED_TASKS = [Tasks.POS_TAGGING, Tasks.DEPENDENCY_PARSING]
60
+ _SOURCE_VERSION = "1.0.0"
61
+ _SEACROWD_VERSION = "2024.06.20"
62
+
63
+
64
+ def config_constructor(subset_id, schema, version):
65
+ return SEACrowdConfig(name=f"{_DATASETNAME}_{subset_id}_{schema}",
66
+ version=datasets.Version(version), description=_DESCRIPTION,
67
+ schema=schema, subset_id=subset_id)
68
+
69
+
70
+ class StbExtDataset(datasets.GeneratorBasedBuilder):
71
+ """This is a seacrowd dataloader for the STB-EXT dataset, which offers a 3-times larger training set, while keeping
72
+ the same dev and test sets from STB-ACL. It provides treebanks with both gold-standard and automatically generated POS tags."""
73
+
74
+ BUILDER_CONFIGS = [
75
+ # source
76
+ config_constructor(subset_id="auto_pos_stack", schema="source", version=_SOURCE_VERSION),
77
+ config_constructor(subset_id="auto_pos_multiview", schema="source", version=_SOURCE_VERSION),
78
+ config_constructor(subset_id="en_ud_autopos", schema="source", version=_SOURCE_VERSION),
79
+ config_constructor(subset_id="gold_pos", schema="source", version=_SOURCE_VERSION),
80
+ # seq_label
81
+ config_constructor(subset_id="auto_pos_stack", schema="seacrowd_seq_label", version=_SEACROWD_VERSION),
82
+ config_constructor(subset_id="auto_pos_multiview", schema="seacrowd_seq_label", version=_SEACROWD_VERSION),
83
+ config_constructor(subset_id="en_ud_autopos", schema="seacrowd_seq_label", version=_SEACROWD_VERSION),
84
+ config_constructor(subset_id="gold_pos", schema="seacrowd_seq_label", version=_SEACROWD_VERSION),
85
+ # dependency parsing
86
+ config_constructor(subset_id="auto_pos_stack", schema="seacrowd_kb", version=_SEACROWD_VERSION),
87
+ config_constructor(subset_id="auto_pos_multiview", schema="seacrowd_kb", version=_SEACROWD_VERSION),
88
+ config_constructor(subset_id="en_ud_autopos", schema="seacrowd_kb", version=_SEACROWD_VERSION),
89
+ config_constructor(subset_id="gold_pos", schema="seacrowd_kb", version=_SEACROWD_VERSION),
90
+ ]
91
+
92
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_gold_pos_source"
93
+
94
+ def _info(self):
95
+ if self.config.schema == "source":
96
+ features = datasets.Features(
97
+ {
98
+ # metadata
99
+ "sent_id": datasets.Value("string"),
100
+ "text": datasets.Value("string"),
101
+ "text_en": datasets.Value("string"),
102
+ # tokens
103
+ "id": [datasets.Value("string")],
104
+ "form": [datasets.Value("string")],
105
+ "lemma": [datasets.Value("string")],
106
+ "upos": [datasets.Value("string")],
107
+ "xpos": [datasets.Value("string")],
108
+ "feats": [datasets.Value("string")],
109
+ "head": [datasets.Value("string")],
110
+ "deprel": [datasets.Value("string")],
111
+ "deps": [datasets.Value("string")],
112
+ "misc": [datasets.Value("string")],
113
+ }
114
+ )
115
+ elif self.config.schema == "seacrowd_seq_label":
116
+ features = schemas.seq_label_features(label_names=_POSTAGS)
117
+ elif self.config.schema == "seacrowd_kb":
118
+ features = schemas.kb_features
119
+ else:
120
+ raise ValueError(f"Invalid config: {self.config.schema}")
121
+
122
+ return datasets.DatasetInfo(
123
+ description=_DESCRIPTION,
124
+ features=features,
125
+ homepage=_HOMEPAGE,
126
+ license=_LICENSE,
127
+ citation=_CITATION,
128
+ )
129
+
130
+ def _split_generators(self, dl_manager):
131
+ """ "return splitGenerators"""
132
+ urls = _URLS[self.config.subset_id]
133
+ downloaded_files = dl_manager.download_and_extract(urls)
134
+ splits = []
135
+ if "train" in downloaded_files:
136
+ splits.append(datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}))
137
+ if "validation" in downloaded_files:
138
+ splits.append(datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}))
139
+ if "test" in downloaded_files:
140
+ splits.append(datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}))
141
+ return splits
142
+
143
+ def _generate_examples(self, filepath):
144
+ def process_buffer(TextIO):
145
+ BOM = "\ufeff"
146
+ buffer = io.StringIO()
147
+ for line in TextIO:
148
+ line = line.replace(BOM, "") if BOM in line else line
149
+ buffer.write(line)
150
+ buffer.seek(0)
151
+ return buffer
152
+
153
+ with open(filepath, "r", encoding="utf-8") as data_file:
154
+ tokenlist = list(conllu.parse_incr(process_buffer(data_file)))
155
+ data_instances = []
156
+ for idx, sent in enumerate(tokenlist):
157
+ idx = sent.metadata["sent_id"] if "sent_id" in sent.metadata else idx
158
+ tokens = [token["form"] for token in sent]
159
+ txt = sent.metadata["text"] if "text" in sent.metadata else " ".join(tokens)
160
+ example = {
161
+ # meta
162
+ "sent_id": str(idx),
163
+ "text": txt,
164
+ "text_en": txt,
165
+ # tokens
166
+ "id": [token["id"] for token in sent],
167
+ "form": [token["form"] for token in sent],
168
+ "lemma": [token["lemma"] for token in sent],
169
+ "upos": [token["upos"] for token in sent],
170
+ "xpos": [token["xpos"] for token in sent],
171
+ "feats": [str(token["feats"]) for token in sent],
172
+ "head": [str(token["head"]) for token in sent],
173
+ "deprel": [str(token["deprel"]) for token in sent],
174
+ "deps": [str(token["deps"]) for token in sent],
175
+ "misc": [str(token["misc"]) for token in sent]
176
+ }
177
+ data_instances.append(example)
178
+
179
+ if self.config.schema == "source":
180
+ pass
181
+ if self.config.schema == "seacrowd_seq_label":
182
+ data_instances = list(
183
+ map(
184
+ lambda d: {
185
+ "id": d["sent_id"],
186
+ "tokens": d["form"],
187
+ "labels": d["upos"],
188
+ },
189
+ data_instances,
190
+ )
191
+ )
192
+ if self.config.schema == "seacrowd_kb":
193
+ data_instances = load_ud_data_as_seacrowd_kb(filepath, data_instances)
194
+ for key, exam in enumerate(data_instances):
195
+ yield key, exam