File size: 28,271 Bytes
29b160c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9c3fe5d
29b160c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Lint as: python3

""" BasqueGLUE: A Natural Language Understanding Benchmark for Basque """

import json
import os
import textwrap

import datasets
from datasets import DownloadManager

_CITATION = """\
@InProceedings{urbizu2022basqueglue,
  author    = {Urbizu, Gorka  and  San Vicente, Iñaki  and  Saralegi, Xabier  and  Agerri, Rodrigo  and  Soroa, Aitor},
  title     = {BasqueGLUE: A Natural Language Understanding Benchmark for Basque},
  booktitle      = {Proceedings of the Language Resources and Evaluation Conference},
  month          = {June},
  year           = {2022},
  address        = {Marseille, France},
  publisher      = {European Language Resources Association},
  pages     = {1603--1612},
  abstract  = {Natural Language Understanding (NLU) technology has improved significantly over the last few years and multitask benchmarks such as GLUE are key to evaluate this improvement in a robust and general way. These benchmarks take into account a wide and diverse set of NLU tasks that require some form of language understanding, beyond the detection of superficial, textual clues. However, they are costly to develop and language-dependent, and therefore they are only available for a small number of languages. In this paper, we present BasqueGLUE, the first NLU benchmark for Basque, a less-resourced language, which has been elaborated from previously existing datasets and following similar criteria to those used for the construction of GLUE and SuperGLUE. We also report the evaluation of two state-of-the-art language models for Basque on BasqueGLUE, thus providing a strong baseline to compare upon. BasqueGLUE is freely available under an open license.},
  url       = {https://aclanthology.org/2022.lrec-1.172}
}
"""

_DESCRIPTION = """\
We present BasqueGLUE, the first NLU benchmark for Basque, which has been elaborated from 
previously existing datasets and following similar criteria to those used for the construction of 
GLUE and SuperGLUE. BasqueGLUE is freely available under an open license.
"""

_HOMEPAGE = "https://github.com/orai-nlp/BasqueGLUE"

URL = "https://raw.githubusercontent.com/orai-nlp/BasqueGLUE/main/"

CONFIGS = [
    "bec",
    "bhtc",
    "coref",
    "intent",
    "nerc_id",
    "nerc_od",
    "qnli",
    "slot",
    "vaxx",
    "wic"
]

SPLITS = {
    "train": datasets.Split.TRAIN,
    "test": datasets.Split.TEST,
    "val": datasets.Split.VALIDATION
}

_URLS = {
    config: {split: URL + f"{config}/{split}.jsonl" for split in SPLITS.keys()} for config in CONFIGS
}
_URLS["wic"]["train"] = URL + "wic/train.zip"


class BasqueGLUEConfig(datasets.BuilderConfig):
    """BuilderConfig for BasqueGLUE"""

    def __init__(self,
                 text_features,
                 label_column,
                 citation,
                 label_classes,
                 int_features=None,
                 is_tokens=False,
                 **kwargs
                 ):
        """
        BuilderConfig for BasqueGLUE

        :param text_features: `list[string]`, the list of text columns
        :param int_features: `list[string]`, the list of int columns (optional)
        :param label_column: `string`, label column
        :param citation: `string`, citation for the data set
        :param label_classes: `list[string]`, the list of classes
        :param is_tokens: `bool`, indicates config is a token classification task
        :param kwargs: keyword arguments forwarded to super
        """
        super(BasqueGLUEConfig, self).__init__(**kwargs)
        self.text_features = text_features
        self.int_features = int_features
        self.label_column = label_column
        self.label_classes = label_classes
        self.citation = citation
        self.is_tokens = is_tokens
        self.label_map = {label: idx for idx, label in enumerate(label_classes)}


class BasqueGLUE(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        BasqueGLUEConfig(
            name='bec',
            description=textwrap.dedent(
                """\
                The Basque Election Campaign 2016 Opinion Dataset (BEC2016eu) is a new dataset for 
                the task of sentiment analysis, a sequence classification task, which contains 
                tweets about the campaign for the Basque elections from 2016. The crawling was 
                carried out during the election campaign period (2016/09/09-2016/09/23), by 
                monitoring the main parties and their respective candidates. The tweets were 
                manually annotated as positive, negative or neutral.
                """
            ),
            text_features=['text'],
            label_column="label",
            label_classes=["N", "NEU", "P"],
            citation=textwrap.dedent(_CITATION)
        ),
        BasqueGLUEConfig(
            name='bhtc',
            description=textwrap.dedent(
                """\
                The corpus contains 12,296 news headlines (brief article descriptions) from the 
                Basque weekly newspaper [Argia](https://www.argia.eus). Topics are classified 
                uniquely according to twelve thematic categories.
                """
            ),
            text_features=["text"],
            label_column="label",
            label_classes=["Ekonomia", "Euskal Herria", "Euskara", "Gizartea", "Historia",
                           "Ingurumena", "Iritzia", "Komunikazioa", "Kultura", "Nazioartea",
                           "Politika", "Zientzia"],
            citation=textwrap.dedent(
                """\
                @inproceedings{agerri-etal-2020-give,
                    title = "Give your Text Representation Models some Love: the Case for {B}asque",
                    author = "Agerri, Rodrigo  and
                      San Vicente, I{\~n}aki  and
                      Campos, Jon Ander  and
                      Barrena, Ander  and
                      Saralegi, Xabier  and
                      Soroa, Aitor  and
                      Agirre, Eneko",
                    booktitle = "Proceedings of the Twelfth Language Resources and Evaluation Conference",
                    month = may,
                    year = "2020",
                    address = "Marseille, France",
                    publisher = "European Language Resources Association",
                    url = "https://aclanthology.org/2020.lrec-1.588",
                    pages = "4781--4788",
                    abstract = "Word embeddings and pre-trained language models allow to build rich representations of text and have enabled improvements across most NLP tasks. Unfortunately they are very expensive to train, and many small companies and research groups tend to use models that have been pre-trained and made available by third parties, rather than building their own. This is suboptimal as, for many languages, the models have been trained on smaller (or lower quality) corpora. In addition, monolingual pre-trained models for non-English languages are not always available. At best, models for those languages are included in multilingual versions, where each language shares the quota of substrings and parameters with the rest of the languages. This is particularly true for smaller languages such as Basque. In this paper we show that a number of monolingual models (FastText word embeddings, FLAIR and BERT language models) trained with larger Basque corpora produce much better results than publicly available versions in downstream NLP tasks, including topic classification, sentiment classification, PoS tagging and NER. This work sets a new state-of-the-art in those tasks for Basque. All benchmarks and models used in this work are publicly available.",
                    language = "English",
                    ISBN = "979-10-95546-34-4",
                }
                """
            )
        ),
        BasqueGLUEConfig(
            name='coref',
            description=textwrap.dedent(
                """\
                EPEC-KORREF-Bin is a dataset derived from EPEC-KORREF (Soraluze et al. 
                2012), a corpus of Basque news documents with manually annotated mentions and 
                coreference chains, which we have been converted into a binary classification 
                task. In this task, the model has to predict whether two mentions from a text, 
                which can be pronouns, nouns or noun phrases, are referring to the same entity. 
                """
            ),
            text_features=["text", 'span1_text', "span2_text"],
            label_column="label",
            label_classes=["false", 'true'],
            int_features=["span1_index", "span2_index"],
            citation=textwrap.dedent(_CITATION)
        ),
        BasqueGLUEConfig(
            name='intent',
            description=textwrap.dedent(
                """\
                This dataset contains utterance texts and intent annotations drawn from the 
                manually-annotated Facebook Multilingual Task Oriented Dataset (FMTOD) (Schuster 
                et al. 2019). Basque translated data was drawn from the datasets created for 
                Building a Task-oriented Dialog System for languages with no training data: the 
                Case for Basque (de Lacalle et al. 2020). The examples are annotated with one of 
                12 different intent classes corresponding to alarm, reminder or weather related 
                actions. 
                """
            ),
            text_features=["text"],
            label_column="label",
            label_classes=["alarm/cancel_alarm", "alarm/modify_alarm", "alarm/set_alarm",
                           "alarm/show_alarms", "alarm/snooze_alarm", "alarm/time_left_on_alarm",
                           "reminder/cancel_reminder", "reminder/set_reminder",
                           "reminder/show_reminders", "weather/checkSunrise",
                           "weather/checkSunset", "weather/find"],
            citation=textwrap.dedent(
                """\
                @inproceedings{lopez-de-lacalle-etal-2020-building,
                    title = "Building a Task-oriented Dialog System for Languages with no Training Data: the Case for {B}asque",
                    author = "L{\'o}pez de Lacalle, Maddalen  and
                      Saralegi, Xabier  and
                      San Vicente, I{\~n}aki",
                    booktitle = "Proceedings of the Twelfth Language Resources and Evaluation Conference",
                    month = may,
                    year = "2020",
                    address = "Marseille, France",
                    publisher = "European Language Resources Association",
                    url = "https://aclanthology.org/2020.lrec-1.340",
                    pages = "2796--2802",
                    abstract = "This paper presents an approach for developing a task-oriented dialog system for less-resourced languages in scenarios where training data is not available. Both intent classification and slot filling are tackled. We project the existing annotations in rich-resource languages by means of Neural Machine Translation (NMT) and posterior word alignments. We then compare training on the projected monolingual data with direct model transfer alternatives. Intent Classifiers and slot filling sequence taggers are implemented using a BiLSTM architecture or by fine-tuning BERT transformer models. Models learnt exclusively from Basque projected data provide better accuracies for slot filling. Combining Basque projected train data with rich-resource languages data outperforms consistently models trained solely on projected data for intent classification. At any rate, we achieve competitive performance in both tasks, with accuracies of 81{\%} for intent classification and 77{\%} for slot filling.",
                    language = "English",
                    ISBN = "979-10-95546-34-4",
                }
                """
            )
        ),
        BasqueGLUEConfig(
            name='nerc_id',
            description=textwrap.dedent(
                """\
                 This dataset contains sentences from the news domain with manually 
                annotated named entities. The data is the merge of EIEC (a dataset of a 
                collection of news wire articles from Euskaldunon Egunkaria newspaper, (Alegria 
                et al. 2004)), and newly annotated data from naiz.eus. The data is annotated 
                following the BIO annotation scheme over four categories: person, organization, 
                location, and miscellaneous. 
                """
            ),
            is_tokens=True,
            text_features=["tokens"],
            label_column="tags",
            label_classes=["O",
                           "B-PER",
                           "I-PER",
                           "B-LOC",
                           "I-LOC",
                           "B-ORG",
                           "I-ORG",
                           "B-MISC",
                           "I-MISC"],
            citation=textwrap.dedent(_CITATION)
        ),
        BasqueGLUEConfig(
            name='nerc_od',
            description=textwrap.dedent(
                """\
                 This dataset contains sentences with manually annotated named entities. The 
                training data is the merge of EIEC (a dataset of a collection of news wire 
                articles from Euskaldunon Egunkaria newspaper, (Alegria et al. 2004)), and newly 
                annotated data from naiz.eus. The data is annotated following the BIO annotation 
                scheme over four categories: person, organization, location, and miscellaneous. 
                For validation and test sets, sentences from Wikipedia were annotated following 
                the same annotation guidelines.
                """
            ),
            is_tokens=True,
            text_features=["tokens"],
            label_column="tags",
            label_classes=["O",
                           "B-PER",
                           "I-PER",
                           "B-LOC",
                           "I-LOC",
                           "B-ORG",
                           "I-ORG",
                           "B-MISC",
                           "I-MISC"],
            citation=textwrap.dedent(_CITATION)
        ),
        BasqueGLUEConfig(
            name='qnli',
            description=textwrap.dedent(
                """\
                This task includes the QA dataset ElkarHizketak (Otegi et al. 2020), 
                a low resource conversational Question Answering (QA) dataset for Basque created 
                by native speaker volunteers. The dataset is built on top of Wikipedia sections 
                about popular people and organizations, and it contains around 400 dialogues and 
                1600 question and answer pairs. The task was adapted into a sentence-pair binary 
                classification task, following the design of QNLI for English (Wang et al. 
                2019). Each question and answer pair are given a label indicating whether the 
                answer is entailed by the question. 
                """
            ),
            text_features=["question", "sentence"],
            label_column="label",
            label_classes=["entailment", "not_entailment"],
            citation=textwrap.dedent(_CITATION)
        ),
        BasqueGLUEConfig(
            name='slot',
            description=textwrap.dedent(
                """\
                This dataset contains utterance texts and sequence intent argument 
                annotations designed for slot filling tasks, drawn from the manually-annotated 
                Facebook Multilingual Task Oriented Dataset (FMTOD) (Schuster et al. 2019). 
                Basque translated data was drawn from the datasets created for Building a 
                Task-oriented Dialog System for languages with no training data: the Case for 
                Basque (de Lacalle et al. 2020). The task is a sequence labelling task similar 
                to NERC, following BIO annotation scheme over 11 categories. 
                """
            ),
            is_tokens=True,
            text_features=["tokens"],
            label_column="tags",
            label_classes=["O",
                           "B-datetime",
                           "B-location",
                           "B-negation",
                           "B-alarm/alarm_modifier",
                           "B-alarm/recurring_period",
                           "B-reminder/noun",
                           "B-reminder/todo",
                           "B-reminder/reference",
                           "B-reminder/recurring_period",
                           "B-weather/attribute",
                           "B-weather/noun",
                           "I-datetime",
                           "I-location",
                           "I-negation",
                           "I-alarm/alarm_modifier",
                           "I-alarm/recurring_period",
                           "I-reminder/noun",
                           "I-reminder/todo",
                           "I-reminder/reference",
                           "I-reminder/recurring_period",
                           "I-weather/attribute",
                           "I-weather/noun"],
            citation=textwrap.dedent(
                """\
                @inproceedings{lopez-de-lacalle-etal-2020-building,
                    title = "Building a Task-oriented Dialog System for Languages with no Training Data: the Case for {B}asque",
                    author = "L{\'o}pez de Lacalle, Maddalen  and
                      Saralegi, Xabier  and
                      San Vicente, I{\~n}aki",
                    booktitle = "Proceedings of the Twelfth Language Resources and Evaluation Conference",
                    month = may,
                    year = "2020",
                    address = "Marseille, France",
                    publisher = "European Language Resources Association",
                    url = "https://aclanthology.org/2020.lrec-1.340",
                    pages = "2796--2802",
                    abstract = "This paper presents an approach for developing a task-oriented dialog system for less-resourced languages in scenarios where training data is not available. Both intent classification and slot filling are tackled. We project the existing annotations in rich-resource languages by means of Neural Machine Translation (NMT) and posterior word alignments. We then compare training on the projected monolingual data with direct model transfer alternatives. Intent Classifiers and slot filling sequence taggers are implemented using a BiLSTM architecture or by fine-tuning BERT transformer models. Models learnt exclusively from Basque projected data provide better accuracies for slot filling. Combining Basque projected train data with rich-resource languages data outperforms consistently models trained solely on projected data for intent classification. At any rate, we achieve competitive performance in both tasks, with accuracies of 81{\%} for intent classification and 77{\%} for slot filling.",
                    language = "English",
                    ISBN = "979-10-95546-34-4",
                }
                """
            )
        ),
        BasqueGLUEConfig(
            name='vaxx',
            description=textwrap.dedent(
                """\
                The VaxxStance (Agerri et al., 2021) dataset originally provides texts and 
                stance annotations for social media texts around the anti-vaccine movement. 
                Texts are given a label indicating whether they express an AGAINST, FAVOR or 
                NEUTRAL stance towards the topic. 
                """
            ),
            text_features=['text'],
            label_column="label",
            label_classes=['AGAINST', 'NONE', 'FAVOR'],
            citation=textwrap.dedent(
                """\
                @article{agerriVaxxStanceIberLEF20212021,
                  title = {{VaxxStance@IberLEF 2021: Overview of the Task on Going Beyond Text in Cross-Lingual Stance Detection}},
                  shorttitle = {{VaxxStance@IberLEF 2021}},
                  author = {Agerri, Rodrigo and Centeno, Roberto and Espinosa, Mar{\'i}a and de Landa, Joseba Fern{\'a}ndez and Rodrigo, {\'A}lvaro},
                  year = {2021},
                  month = sep,
                  journal = {Procesamiento del Lenguaje Natural},
                  volume = {67},
                  number = {0},
                  pages = {173--181},
                  issn = {1989-7553},
                  abstract = {This paper describes the VaxxStance task at IberLEF 2021. The task proposes to detect stance in Tweets referring to vaccines, a relevant and controversial topic in the current pandemia. The task is proposed in a multilingual setting, providing data for Basque and Spanish languages. The objective is to explore crosslingual approaches which also complement textual information with contextual features obtained from the social network. The results demonstrate that contextual information is crucial to obtain competitive results, especially across languages.},
                  copyright = {Copyright (c) 2021 Procesamiento del Lenguaje Natural},
                  langid = {spanish},
                }
                """
            )
        ),
        BasqueGLUEConfig(
            name='wic',
            description=textwrap.dedent(
                """\
                Word in Context or WiC (Pilehvar and Camacho-Collados 2019) is a word sense 
                disambiguation (WSD) task, designed as a particular form of sentence pair binary 
                classification. Given two text snippets and a polyse mous word that appears in 
                both of them (the span of the word is marked in both snippets), the task is to 
                determine whether the word has the same sense in both sentences. This dataset is 
                based on the EPEC-EuSemcor (Pociello et al. 2011) sense-tagged corpus. 
                """
            ),
            text_features=['sentence1', 'sentence2', 'word'],
            int_features=['start1', 'start2', 'end1', 'end2'],
            label_column="label",
            label_classes=['false', 'true'],
            citation=textwrap.dedent(
                """\
                @article{agerriVaxxStanceIberLEF20212021,
                  title = {{VaxxStance@IberLEF 2021: Overview of the Task on Going Beyond Text in Cross-Lingual Stance Detection}},
                  shorttitle = {{VaxxStance@IberLEF 2021}},
                  author = {Agerri, Rodrigo and Centeno, Roberto and Espinosa, Mar{\'i}a and de Landa, Joseba Fern{\'a}ndez and Rodrigo, {\'A}lvaro},
                  year = {2021},
                  month = sep,
                  journal = {Procesamiento del Lenguaje Natural},
                  volume = {67},
                  number = {0},
                  pages = {173--181},
                  issn = {1989-7553},
                  abstract = {This paper describes the VaxxStance task at IberLEF 2021. The task proposes to detect stance in Tweets referring to vaccines, a relevant and controversial topic in the current pandemia. The task is proposed in a multilingual setting, providing data for Basque and Spanish languages. The objective is to explore crosslingual approaches which also complement textual information with contextual features obtained from the social network. The results demonstrate that contextual information is crucial to obtain competitive results, especially across languages.},
                  copyright = {Copyright (c) 2021 Procesamiento del Lenguaje Natural},
                  langid = {spanish},
                }
                """
            )
        ),
    ]

    def _info(self):
        if self.config.is_tokens:
            features = {
                text_feature: datasets.Sequence(datasets.Value("string")) for text_feature in
                self.config.text_features
            }
            features[self.config.label_column] = datasets.Sequence(
                datasets.features.ClassLabel(names=self.config.label_classes)
            )
        else:
            features = {
                text_feature: datasets.Value("string") for text_feature in
                self.config.text_features
            }
            features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
        if self.config.int_features:
            for int_feature in self.config.int_features:
                features[int_feature] = datasets.Value("int32")
        features["idx"] = datasets.Value("int32")
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(features),
            citation=self.config.citation,
        )

    def _split_generators(self, dl_manager: DownloadManager):
        """
        Return SplitGenerators.
        """
        data_urls = _URLS[self.config.name]
        splits = []
        for split, sp_type in SPLITS.items():
            data_url = data_urls[split]
            if 'jsonl' in data_url:
                data_file = dl_manager.download(data_url)
            else:
                data_dir = dl_manager.download_and_extract(data_url)
                json_file = [f for f in os.listdir(data_dir) if f.endswith('jsonl')][0]
                data_file = os.path.join(data_dir, json_file)

            splits.append(
                datasets.SplitGenerator(
                    name=sp_type,
                    gen_kwargs={
                        "data_file": data_file
                    }
                )
            )
        return splits

    def _generate_examples(self, data_file):
        """
        Yield examples.
        """
        with open(data_file, encoding="utf8", mode="r") as f:
            id_ = 0
            for line in f:
                data = json.loads(line)

                if self.config.name == 'coref':
                    example = {
                        'text': data['text'],
                        'span1_text': data['target']['span1_text'],
                        'span2_text': data['target']['span2_text'],
                        'span1_index': int(data['target']['span1_index']),
                        'span2_index': int(data['target']['span2_index'])
                    }
                else:
                    example = {
                        feat: data[feat] for feat in self.config.text_features
                    }

                    if self.config.int_features:
                        for feat in self.config.int_features:
                            example[feat] = int(data[feat])

                example['idx'] = data['idx']

                label_data = data[self.config.label_column]
                if type(label_data) == bool:
                    label_data = str(label_data).lower()
                if self.config.is_tokens:
                    label = [self.config.label_map[tag] for tag in label_data]
                else:
                    label = self.config.label_map[label_data]
                example[self.config.label_column] = label

                # Filter out corrupted rows.
                for value in example.values():
                    if value is None:
                        break
                else:
                    yield id_, example

                id_ += 1