Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
24e96ba
1 Parent(s): 668b539

Delete loading script

Browse files
Files changed (1) hide show
  1. americas_nli.py +0 -177
americas_nli.py DELETED
@@ -1,177 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. Licensed under the Apache License, Version 2.0 (the "License");
3
- # you may not use this file except in compliance with the License.
4
- # You may obtain a copy of the License at
5
- #
6
- # http://www.apache.org/licenses/LICENSE-2.0
7
- #
8
- # Unless required by applicable law or agreed to in writing, software
9
- # distributed under the License is distributed on an "AS IS" BASIS,
10
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
- # See the License for the specific language governing permissions and
12
- # limitations under the License.
13
-
14
- # Lint as: python3
15
- """AmericasNLI: A NLI Corpus of 10 Indigenous Low-Resource Languages."""
16
-
17
-
18
- import csv
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """
24
- @article{DBLP:journals/corr/abs-2104-08726,
25
- author = {Abteen Ebrahimi and
26
- Manuel Mager and
27
- Arturo Oncevay and
28
- Vishrav Chaudhary and
29
- Luis Chiruzzo and
30
- Angela Fan and
31
- John Ortega and
32
- Ricardo Ramos and
33
- Annette Rios and
34
- Ivan Vladimir and
35
- Gustavo A. Gim{\'{e}}nez{-}Lugo and
36
- Elisabeth Mager and
37
- Graham Neubig and
38
- Alexis Palmer and
39
- Rolando A. Coto Solano and
40
- Ngoc Thang Vu and
41
- Katharina Kann},
42
- title = {AmericasNLI: Evaluating Zero-shot Natural Language Understanding of
43
- Pretrained Multilingual Models in Truly Low-resource Languages},
44
- journal = {CoRR},
45
- volume = {abs/2104.08726},
46
- year = {2021},
47
- url = {https://arxiv.org/abs/2104.08726},
48
- eprinttype = {arXiv},
49
- eprint = {2104.08726},
50
- timestamp = {Mon, 26 Apr 2021 17:25:10 +0200},
51
- biburl = {https://dblp.org/rec/journals/corr/abs-2104-08726.bib},
52
- bibsource = {dblp computer science bibliography, https://dblp.org}
53
- }
54
- """
55
-
56
- _DESCRIPTION = """\
57
- AmericasNLI is an extension of XNLI (Conneau et al., 2018) – a natural language inference (NLI) dataset covering 15 high-resource languages – to 10 low-resource indigenous languages spoken in the Americas: Ashaninka, Aymara, Bribri, Guarani, Nahuatl, Otomi, Quechua, Raramuri, Shipibo-Konibo, and Wixarika. As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels).
58
- """
59
-
60
- VERSION = datasets.Version("1.0.0", "")
61
- _DEV_DATA_URL = "https://raw.githubusercontent.com/nala-cub/AmericasNLI/main/dev.tsv"
62
- _TEST_DATA_URL = "https://raw.githubusercontent.com/nala-cub/AmericasNLI/main/test.tsv"
63
-
64
- _LANGUAGES = ("aym", "bzd", "cni", "gn", "hch", "nah", "oto", "quy", "shp", "tar")
65
-
66
-
67
- class AmericasNLIConfig(datasets.BuilderConfig):
68
- """BuilderConfig for AmericasNLI."""
69
-
70
- def __init__(self, language: str, languages=None, **kwargs):
71
- """BuilderConfig for AmericasNLI.
72
-
73
- Args:
74
- language: One of aym, bzd, cni, gn, hch, nah, oto, quy, shp, tar or all_languages
75
- **kwargs: keyword arguments forwarded to super.
76
- """
77
- super(AmericasNLIConfig, self).__init__(**kwargs)
78
- self.language = language
79
- if language != "all_languages":
80
- self.languages = [language]
81
- else:
82
- self.languages = languages if languages is not None else _LANGUAGES
83
-
84
-
85
- class AmericasNLI(datasets.GeneratorBasedBuilder):
86
- """TODO"""
87
-
88
- VERSION = VERSION
89
- BUILDER_CONFIG_CLASS = AmericasNLIConfig
90
- BUILDER_CONFIGS = [
91
- AmericasNLIConfig(
92
- name=lang,
93
- language=lang,
94
- version=VERSION,
95
- description=f"Plain text import of AmericasNLI for the {lang} language",
96
- )
97
- for lang in _LANGUAGES
98
- ] + [
99
- AmericasNLIConfig(
100
- name="all_languages",
101
- language="all_languages",
102
- version=VERSION,
103
- description="Plain text import of AmericasNLI for all languages",
104
- )
105
- ]
106
-
107
- def _info(self):
108
- if self.config.language == "all_languages":
109
- features = datasets.Features(
110
- {
111
- "language": datasets.Value("string"),
112
- "premise": datasets.Value("string"),
113
- "hypothesis": datasets.Value("string"),
114
- "label": datasets.ClassLabel(names=["entailment", "neutral", "contradiction"]),
115
- }
116
- )
117
- else:
118
- features = datasets.Features(
119
- {
120
- "premise": datasets.Value("string"),
121
- "hypothesis": datasets.Value("string"),
122
- "label": datasets.ClassLabel(names=["entailment", "neutral", "contradiction"]),
123
- }
124
- )
125
- return datasets.DatasetInfo(
126
- description=_DESCRIPTION,
127
- features=features,
128
- # No default supervised_keys (as we have to pass both premise
129
- # and hypothesis as input).
130
- supervised_keys=None,
131
- homepage="https://github.com/nala-cub/AmericasNLI",
132
- citation=_CITATION,
133
- )
134
-
135
- def _split_generators(self, dl_manager):
136
- dl_paths = dl_manager.download(
137
- {
138
- "dev_data": _DEV_DATA_URL,
139
- "test_data": _TEST_DATA_URL,
140
- }
141
- )
142
- return [
143
- datasets.SplitGenerator(
144
- name=datasets.Split.VALIDATION,
145
- gen_kwargs={
146
- "filepath": dl_paths["dev_data"],
147
- },
148
- ),
149
- datasets.SplitGenerator(
150
- name=datasets.Split.TEST,
151
- gen_kwargs={
152
- "filepath": dl_paths["test_data"],
153
- },
154
- ),
155
- ]
156
-
157
- def _generate_examples(self, filepath: str):
158
- """This function returns the examples in the raw (text) form."""
159
- idx = 0
160
- with open(filepath, encoding="utf-8") as f:
161
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
162
- for row in reader:
163
- if row["language"] == self.config.language:
164
- yield idx, {
165
- "premise": row["premise"],
166
- "hypothesis": row["hypothesis"],
167
- "label": row["label"],
168
- }
169
- idx += 1
170
- elif self.config.language == "all_languages":
171
- yield idx, {
172
- "language": row["language"],
173
- "premise": row["premise"],
174
- "hypothesis": row["hypothesis"],
175
- "label": row["label"],
176
- }
177
- idx += 1