Datasets:

Modalities:
Text
Formats:
parquet
Sub-tasks:
extractive-qa
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
da78f23
1 Parent(s): 824c1b7

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (636eb9785c3332205c0e8ae31af18af177e63701)
- Add 'secondary_task' config data files (3b1697c5efff0198278f076247d0615805c78e18)
- Delete loading script (843ef534eb78f0eb7331a8389e2f8a7b4359d5de)

README.md CHANGED
@@ -1,5 +1,4 @@
1
  ---
2
- pretty_name: TyDi QA
3
  annotations_creators:
4
  - crowdsourced
5
  language_creators:
@@ -29,6 +28,7 @@ task_categories:
29
  task_ids:
30
  - extractive-qa
31
  paperswithcode_id: tydi-qa
 
32
  dataset_info:
33
  - config_name: primary_task
34
  features:
@@ -60,13 +60,13 @@ dataset_info:
60
  dtype: string
61
  splits:
62
  - name: train
63
- num_bytes: 5550574617
64
  num_examples: 166916
65
  - name: validation
66
- num_bytes: 484380443
67
  num_examples: 18670
68
- download_size: 1953887429
69
- dataset_size: 6034955060
70
  - config_name: secondary_task
71
  features:
72
  - name: id
@@ -85,13 +85,26 @@ dataset_info:
85
  dtype: int32
86
  splits:
87
  - name: train
88
- num_bytes: 52948607
89
  num_examples: 49881
90
  - name: validation
91
- num_bytes: 5006461
92
  num_examples: 5077
93
- download_size: 1953887429
94
- dataset_size: 57955068
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  ---
96
 
97
  # Dataset Card for "tydiqa"
 
1
  ---
 
2
  annotations_creators:
3
  - crowdsourced
4
  language_creators:
 
28
  task_ids:
29
  - extractive-qa
30
  paperswithcode_id: tydi-qa
31
+ pretty_name: TyDi QA
32
  dataset_info:
33
  - config_name: primary_task
34
  features:
 
60
  dtype: string
61
  splits:
62
  - name: train
63
+ num_bytes: 5550573801
64
  num_examples: 166916
65
  - name: validation
66
+ num_bytes: 484380347
67
  num_examples: 18670
68
+ download_size: 2912112378
69
+ dataset_size: 6034954148
70
  - config_name: secondary_task
71
  features:
72
  - name: id
 
85
  dtype: int32
86
  splits:
87
  - name: train
88
+ num_bytes: 52948467
89
  num_examples: 49881
90
  - name: validation
91
+ num_bytes: 5006433
92
  num_examples: 5077
93
+ download_size: 29402238
94
+ dataset_size: 57954900
95
+ configs:
96
+ - config_name: primary_task
97
+ data_files:
98
+ - split: train
99
+ path: primary_task/train-*
100
+ - split: validation
101
+ path: primary_task/validation-*
102
+ - config_name: secondary_task
103
+ data_files:
104
+ - split: train
105
+ path: secondary_task/train-*
106
+ - split: validation
107
+ path: secondary_task/validation-*
108
  ---
109
 
110
  # Dataset Card for "tydiqa"
primary_task/train-00000-of-00012.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:700742a20446def778bc9a0cbc3b352767e6744a5095cade74916aa4bec8545f
3
+ size 218765187
primary_task/train-00001-of-00012.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:669ea731c5beac5e25ce13a05473ff8b5002e2c7cfbbc28384d7a0a8732666c6
3
+ size 224918293
primary_task/train-00002-of-00012.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e044ed9f7998869c0eb4166f3dcf319f9723021c040a035cdf073f8889c33e30
3
+ size 224137195
primary_task/train-00003-of-00012.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a0ab58b585872d023beb40b31c24a042674ccb138fb89553a7b6a58e71c99c1
3
+ size 226849712
primary_task/train-00004-of-00012.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bf5aee844705343ba65b277966c66fe134b78c265f813975d796033ba78ba6a
3
+ size 226002108
primary_task/train-00005-of-00012.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a56b52b3edcfa23b2c416f88b65722571b80ec74f5268ad3f32d7f3eb913c00
3
+ size 224119388
primary_task/train-00006-of-00012.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0267fb184abc39c0425b021ac448238af819ae1d07b1f24d632cea9edbd25efd
3
+ size 224002879
primary_task/train-00007-of-00012.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67b81b4c366631c672c17cb3309338b7ed5e10acad334a109ce2c9e9086e0275
3
+ size 225694370
primary_task/train-00008-of-00012.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8c9301416094e8f828a32cd3c50ddede5ce0530d2782aeaf38d265d047e85ac
3
+ size 220991562
primary_task/train-00009-of-00012.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b946277ab62d31073d73320d8dcf753462d9d9af9eb3d21f3fe69285ddf1ed94
3
+ size 222688638
primary_task/train-00010-of-00012.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dca1aa4b9891b8ca927e93358f3db928c18fd195c5406644506c9df9f01a0938
3
+ size 225095519
primary_task/train-00011-of-00012.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:572162a06b6390dbfe94889cdb984dddaf2105ce140f12619a766da7c23bf6d4
3
+ size 216812403
primary_task/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e9557a6ea4bfd65f381c64b91122c90554e4b00f86e15a8b4f1353be1c81510
3
+ size 232035124
secondary_task/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8139944554bcd89f6c4b167b93c9a63ec44a6d97b8caebe049061f78fcc0e786
3
+ size 26918058
secondary_task/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38760fbfc8fa4d0765026d56cfbb65a39231ad8cf5afef40f9c962a26bd3f94d
3
+ size 2484180
tydiqa.py DELETED
@@ -1,268 +0,0 @@
1
- """TODO(tydiqa): Add a description here."""
2
-
3
-
4
- import json
5
- import textwrap
6
-
7
- import datasets
8
- from datasets.tasks import QuestionAnsweringExtractive
9
-
10
-
11
- # TODO(tydiqa): BibTeX citation
12
- _CITATION = """\
13
- @article{tydiqa,
14
- title = {TyDi QA: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages},
15
- author = {Jonathan H. Clark and Eunsol Choi and Michael Collins and Dan Garrette and Tom Kwiatkowski and Vitaly Nikolaev and Jennimaria Palomaki}
16
- year = {2020},
17
- journal = {Transactions of the Association for Computational Linguistics}
18
- }
19
- """
20
-
21
- # TODO(tydiqa):
22
- _DESCRIPTION = """\
23
- TyDi QA is a question answering dataset covering 11 typologically diverse languages with 204K question-answer pairs.
24
- The languages of TyDi QA are diverse with regard to their typology -- the set of linguistic features that each language
25
- expresses -- such that we expect models performing well on this set to generalize across a large number of the languages
26
- in the world. It contains language phenomena that would not be found in English-only corpora. To provide a realistic
27
- information-seeking task and avoid priming effects, questions are written by people who want to know the answer, but
28
- don’t know the answer yet, (unlike SQuAD and its descendents) and the data is collected directly in each language without
29
- the use of translation (unlike MLQA and XQuAD).
30
- """
31
-
32
- _URL = "https://storage.googleapis.com/tydiqa/"
33
- _PRIMARY_URLS = {
34
- "train": _URL + "v1.0/tydiqa-v1.0-train.jsonl.gz",
35
- "dev": _URL + "v1.0/tydiqa-v1.0-dev.jsonl.gz",
36
- }
37
- _SECONDARY_URLS = {
38
- "train": _URL + "v1.1/tydiqa-goldp-v1.1-train.json",
39
- "dev": _URL + "v1.1/tydiqa-goldp-v1.1-dev.json",
40
- }
41
-
42
-
43
- class TydiqaConfig(datasets.BuilderConfig):
44
-
45
- """BuilderConfig for Tydiqa"""
46
-
47
- def __init__(self, **kwargs):
48
- """
49
-
50
- Args:
51
- **kwargs: keyword arguments forwarded to super.
52
- """
53
- super(TydiqaConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
54
-
55
-
56
- class Tydiqa(datasets.GeneratorBasedBuilder):
57
- """TODO(tydiqa): Short description of my dataset."""
58
-
59
- # TODO(tydiqa): Set up version.
60
- VERSION = datasets.Version("0.1.0")
61
- BUILDER_CONFIGS = [
62
- TydiqaConfig(
63
- name="primary_task",
64
- description=textwrap.dedent(
65
- """\
66
- Passage selection task (SelectP): Given a list of the passages in the article, return either (a) the index of
67
- the passage that answers the question or (b) NULL if no such passage exists.
68
- Minimal answer span task (MinSpan): Given the full text of an article, return one of (a) the start and end
69
- byte indices of the minimal span that completely answers the question; (b) YES or NO if the question requires
70
- a yes/no answer and we can draw a conclusion from the passage; (c) NULL if it is not possible to produce a
71
- minimal answer for this question."""
72
- ),
73
- ),
74
- TydiqaConfig(
75
- name="secondary_task",
76
- description=textwrap.dedent(
77
- """Gold passage task (GoldP): Given a passage that is guaranteed to contain the
78
- answer, predict the single contiguous span of characters that answers the question. This is more similar to
79
- existing reading comprehension datasets (as opposed to the information-seeking task outlined above).
80
- This task is constructed with two goals in mind: (1) more directly comparing with prior work and (2) providing
81
- a simplified way for researchers to use TyDi QA by providing compatibility with existing code for SQuAD 1.1,
82
- XQuAD, and MLQA. Toward these goals, the gold passage task differs from the primary task in several ways:
83
- only the gold answer passage is provided rather than the entire Wikipedia article;
84
- unanswerable questions have been discarded, similar to MLQA and XQuAD;
85
- we evaluate with the SQuAD 1.1 metrics like XQuAD; and
86
- Thai and Japanese are removed since the lack of whitespace breaks some tools.
87
- """
88
- ),
89
- ),
90
- ]
91
-
92
- def _info(self):
93
- # TODO(tydiqa): Specifies the datasets.DatasetInfo object
94
- if self.config.name == "primary_task":
95
- return datasets.DatasetInfo(
96
- # This is the description that will appear on the datasets page.
97
- description=_DESCRIPTION,
98
- # datasets.features.FeatureConnectors
99
- features=datasets.Features(
100
- {
101
- "passage_answer_candidates": datasets.features.Sequence(
102
- {
103
- "plaintext_start_byte": datasets.Value("int32"),
104
- "plaintext_end_byte": datasets.Value("int32"),
105
- }
106
- ),
107
- "question_text": datasets.Value("string"),
108
- "document_title": datasets.Value("string"),
109
- "language": datasets.Value("string"),
110
- "annotations": datasets.features.Sequence(
111
- {
112
- # 'annotation_id': datasets.Value('variant'),
113
- "passage_answer_candidate_index": datasets.Value("int32"),
114
- "minimal_answers_start_byte": datasets.Value("int32"),
115
- "minimal_answers_end_byte": datasets.Value("int32"),
116
- "yes_no_answer": datasets.Value("string"),
117
- }
118
- ),
119
- "document_plaintext": datasets.Value("string"),
120
- # 'example_id': datasets.Value('variant'),
121
- "document_url": datasets.Value("string")
122
- # These are the features of your dataset like images, labels ...
123
- }
124
- ),
125
- # If there's a common (input, target) tuple from the features,
126
- # specify them here. They'll be used if as_supervised=True in
127
- # builder.as_dataset.
128
- supervised_keys=None,
129
- # Homepage of the dataset for documentation
130
- homepage="https://github.com/google-research-datasets/tydiqa",
131
- citation=_CITATION,
132
- )
133
- elif self.config.name == "secondary_task":
134
- return datasets.DatasetInfo(
135
- description=_DESCRIPTION,
136
- features=datasets.Features(
137
- {
138
- "id": datasets.Value("string"),
139
- "title": datasets.Value("string"),
140
- "context": datasets.Value("string"),
141
- "question": datasets.Value("string"),
142
- "answers": datasets.features.Sequence(
143
- {
144
- "text": datasets.Value("string"),
145
- "answer_start": datasets.Value("int32"),
146
- }
147
- ),
148
- }
149
- ),
150
- # No default supervised_keys (as we have to pass both question
151
- # and context as input).
152
- supervised_keys=None,
153
- homepage="https://github.com/google-research-datasets/tydiqa",
154
- citation=_CITATION,
155
- task_templates=[
156
- QuestionAnsweringExtractive(
157
- question_column="question", context_column="context", answers_column="answers"
158
- )
159
- ],
160
- )
161
-
162
- def _split_generators(self, dl_manager):
163
- """Returns SplitGenerators."""
164
- # TODO(tydiqa): Downloads the data and defines the splits
165
- # dl_manager is a datasets.download.DownloadManager that can be used to
166
- # download and extract URLs
167
- primary_downloaded = dl_manager.download_and_extract(_PRIMARY_URLS)
168
- secondary_downloaded = dl_manager.download_and_extract(_SECONDARY_URLS)
169
- if self.config.name == "primary_task":
170
- return [
171
- datasets.SplitGenerator(
172
- name=datasets.Split.TRAIN,
173
- # These kwargs will be passed to _generate_examples
174
- gen_kwargs={"filepath": primary_downloaded["train"]},
175
- ),
176
- datasets.SplitGenerator(
177
- name=datasets.Split.VALIDATION,
178
- # These kwargs will be passed to _generate_examples
179
- gen_kwargs={"filepath": primary_downloaded["dev"]},
180
- ),
181
- ]
182
- elif self.config.name == "secondary_task":
183
- return [
184
- datasets.SplitGenerator(
185
- name=datasets.Split.TRAIN,
186
- # These kwargs will be passed to _generate_examples
187
- gen_kwargs={"filepath": secondary_downloaded["train"]},
188
- ),
189
- datasets.SplitGenerator(
190
- name=datasets.Split.VALIDATION,
191
- # These kwargs will be passed to _generate_examples
192
- gen_kwargs={"filepath": secondary_downloaded["dev"]},
193
- ),
194
- ]
195
-
196
- def _generate_examples(self, filepath):
197
- """Yields examples."""
198
- # TODO(tydiqa): Yields (key, example) tuples from the dataset
199
- if self.config.name == "primary_task":
200
- with open(filepath, encoding="utf-8") as f:
201
- for id_, row in enumerate(f):
202
- data = json.loads(row)
203
- passages = data["passage_answer_candidates"]
204
- end_byte = [passage["plaintext_end_byte"] for passage in passages]
205
- start_byte = [passage["plaintext_start_byte"] for passage in passages]
206
- title = data["document_title"]
207
- lang = data["language"]
208
- question = data["question_text"]
209
- annotations = data["annotations"]
210
- # annot_ids = [annotation["annotation_id"] for annotation in annotations]
211
- yes_no_answers = [annotation["yes_no_answer"] for annotation in annotations]
212
- min_answers_end_byte = [
213
- annotation["minimal_answer"]["plaintext_end_byte"] for annotation in annotations
214
- ]
215
- min_answers_start_byte = [
216
- annotation["minimal_answer"]["plaintext_start_byte"] for annotation in annotations
217
- ]
218
- passage_cand_answers = [
219
- annotation["passage_answer"]["candidate_index"] for annotation in annotations
220
- ]
221
- doc = data["document_plaintext"]
222
- # example_id = data["example_id"]
223
- url = data["document_url"]
224
- yield id_, {
225
- "passage_answer_candidates": {
226
- "plaintext_start_byte": start_byte,
227
- "plaintext_end_byte": end_byte,
228
- },
229
- "question_text": question,
230
- "document_title": title,
231
- "language": lang,
232
- "annotations": {
233
- # 'annotation_id': annot_ids,
234
- "passage_answer_candidate_index": passage_cand_answers,
235
- "minimal_answers_start_byte": min_answers_start_byte,
236
- "minimal_answers_end_byte": min_answers_end_byte,
237
- "yes_no_answer": yes_no_answers,
238
- },
239
- "document_plaintext": doc,
240
- # 'example_id': example_id,
241
- "document_url": url,
242
- }
243
- elif self.config.name == "secondary_task":
244
- with open(filepath, encoding="utf-8") as f:
245
- data = json.load(f)
246
- for article in data["data"]:
247
- title = article.get("title", "").strip()
248
- for paragraph in article["paragraphs"]:
249
- context = paragraph["context"].strip()
250
- for qa in paragraph["qas"]:
251
- question = qa["question"].strip()
252
- id_ = qa["id"]
253
-
254
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
255
- answers = [answer["text"].strip() for answer in qa["answers"]]
256
-
257
- # Features currently used are "context", "question", and "answers".
258
- # Others are extracted here for the ease of future expansions.
259
- yield id_, {
260
- "title": title,
261
- "context": context,
262
- "question": question,
263
- "id": id_,
264
- "answers": {
265
- "answer_start": answer_starts,
266
- "text": answers,
267
- },
268
- }