Datasets:
Tasks:
Question Answering
Sub-tasks:
extractive-qa
Languages:
English
Size:
100K<n<1M
ArXiv:
License:
Commit
•
06907e4
1
Parent(s):
6122889
Host data files (#4)
Browse files- Host data files (b0eb0fa6dd06f6e669e6ad30c7f5d0d69df27303)
- Update and optimize loading script (87af48010ec095193b57eb5feee084a50a28f824)
- Delete legacy dataset_infos.json (02b3d7df23e60d9f349201b0a4888ca9ab11c267)
- data/raw_jeopardy/000000-029999.zip +3 -0
- data/raw_jeopardy/030000-49999.zip +3 -0
- data/raw_jeopardy/050000-059999.zip +3 -0
- data/raw_jeopardy/060000-089999.zip +3 -0
- data/raw_jeopardy/090000-119999.zip +3 -0
- data/raw_jeopardy/120000-149999.zip +3 -0
- data/raw_jeopardy/150000-179999.zip +3 -0
- data/raw_jeopardy/180000-216929.zip +3 -0
- data/train_test_val/test.zip +3 -0
- data/train_test_val/train.zip +3 -0
- data/train_test_val/val.zip +3 -0
- dataset_infos.json +0 -1
- search_qa.py +22 -47
data/raw_jeopardy/000000-029999.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f53a2b34a3fd6ea47e3f457ceed64be20526b999ae0e95492f36006249aecc1
|
3 |
+
size 534274946
|
data/raw_jeopardy/030000-49999.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:761ae95f7b8c8e430c87967e58b7f73ad028038dab74faf4186089b8624df541
|
3 |
+
size 201603716
|
data/raw_jeopardy/050000-059999.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ab81d6faf08788bbeedd9d42a516f1b9f3b3b38494da0296e02187bbe1f6e24b
|
3 |
+
size 185783076
|
data/raw_jeopardy/060000-089999.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c8d48fe96f1e2b98178d029226f45bdc0f4747cd42cf0d174040557df6a35873
|
3 |
+
size 560579675
|
data/raw_jeopardy/090000-119999.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:873a7c5c9c2be3f884fe37fa75da0564cfbe15ea9d98ec3d24110266e1dbf5e3
|
3 |
+
size 554781032
|
data/raw_jeopardy/120000-149999.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:743e66a962c0696b3cd2cd789bd2d08bbabdf0c3c25d69d3bef458689bf6a947
|
3 |
+
size 304790927
|
data/raw_jeopardy/150000-179999.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e31e02e52e1da262bd4e37564cb5ea5df08b1d7c14510ee4e2a998949f1e5e7d
|
3 |
+
size 305338965
|
data/raw_jeopardy/180000-216929.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fb1c5d9b1e1e9c2521c98759b39b6c49b51de8ace5b2d87e20435ff1cd7ff861
|
3 |
+
size 662352076
|
data/train_test_val/test.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ab7e0222eff6420c3c378d97c15dc9d1d657abac34c5d1660dd38a5d075dab1c
|
3 |
+
size 621941314
|
data/train_test_val/train.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b63af0c0dfe66b26e98ff3c3407b087efce96ecabeb510e8dd86d60cddf4aac4
|
3 |
+
size 2233758217
|
data/train_test_val/val.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d219bad6ba199e9a2d244b2c7ea5b79638d05f4c0ae786e6c53bd3ee282d238c
|
3 |
+
size 314027537
|
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"raw_jeopardy": {"description": "\n# pylint: disable=line-too-long\nWe publicly release a new large-scale dataset, called SearchQA, for machine comprehension, or question-answering. Unlike recently released datasets, such as DeepMind \nCNN/DailyMail and SQuAD, the proposed SearchQA was constructed to reflect a full pipeline of general question-answering. That is, we start not from an existing article \nand generate a question-answer pair, but start from an existing question-answer pair, crawled from J! Archive, and augment it with text snippets retrieved by Google. \nFollowing this approach, we built SearchQA, which consists of more than 140k question-answer pairs with each pair having 49.6 snippets on average. Each question-answer-context\n tuple of the SearchQA comes with additional meta-data such as the snippet's URL, which we believe will be valuable resources for future research. We conduct human evaluation \n as well as test two baseline methods, one simple word selection and the other deep learning based, on the SearchQA. We show that there is a meaningful gap between the human \n and machine performances. This suggests that the proposed dataset could well serve as a benchmark for question-answering.\n\n", "citation": "\n @article{DBLP:journals/corr/DunnSHGCC17,\n author = {Matthew Dunn and\n Levent Sagun and\n Mike Higgins and\n V. Ugur G{\"{u}}ney and\n Volkan Cirik and\n Kyunghyun Cho},\n title = {SearchQA: {A} New Q{\\&}A Dataset Augmented with Context from a\n Search Engine},\n journal = {CoRR},\n volume = {abs/1704.05179},\n year = {2017},\n url = {http://arxiv.org/abs/1704.05179},\n archivePrefix = {arXiv},\n eprint = {1704.05179},\n timestamp = {Mon, 13 Aug 2018 16:47:09 +0200},\n biburl = {https://dblp.org/rec/journals/corr/DunnSHGCC17.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n }\n\n", "homepage": "https://github.com/nyu-dl/dl4ir-searchQA", "license": "", "features": {"category": {"dtype": "string", "id": null, "_type": "Value"}, "air_date": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "round": {"dtype": "string", "id": null, "_type": "Value"}, "show_number": {"dtype": "int32", "id": null, "_type": "Value"}, "search_results": {"feature": {"urls": {"dtype": "string", "id": null, "_type": "Value"}, "snippets": {"dtype": "string", "id": null, "_type": "Value"}, "titles": {"dtype": "string", "id": null, "_type": "Value"}, "related_links": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "supervised_keys": null, "builder_name": "search_qa", "config_name": "raw_jeopardy", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7770972348, "num_examples": 216757, "dataset_name": "search_qa"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1U7WdBpd9kJ85S7BbBhWUSiy9NnXrKdO6": {"num_bytes": 3314386157, "checksum": "daaf1ddbb0c34c49832f6c8c26c9d59222085d45c7740425ccad9e38a9232cb4"}}, "download_size": 3314386157, "dataset_size": 7770972348, "size_in_bytes": 11085358505}, "train_test_val": {"description": "\n# pylint: disable=line-too-long\nWe publicly release a new large-scale dataset, called SearchQA, for machine comprehension, or question-answering. Unlike recently released datasets, such as DeepMind \nCNN/DailyMail and SQuAD, the proposed SearchQA was constructed to reflect a full pipeline of general question-answering. That is, we start not from an existing article \nand generate a question-answer pair, but start from an existing question-answer pair, crawled from J! Archive, and augment it with text snippets retrieved by Google. \nFollowing this approach, we built SearchQA, which consists of more than 140k question-answer pairs with each pair having 49.6 snippets on average. Each question-answer-context\n tuple of the SearchQA comes with additional meta-data such as the snippet's URL, which we believe will be valuable resources for future research. We conduct human evaluation \n as well as test two baseline methods, one simple word selection and the other deep learning based, on the SearchQA. We show that there is a meaningful gap between the human \n and machine performances. This suggests that the proposed dataset could well serve as a benchmark for question-answering.\n\n", "citation": "\n @article{DBLP:journals/corr/DunnSHGCC17,\n author = {Matthew Dunn and\n Levent Sagun and\n Mike Higgins and\n V. Ugur G{\"{u}}ney and\n Volkan Cirik and\n Kyunghyun Cho},\n title = {SearchQA: {A} New Q{\\&}A Dataset Augmented with Context from a\n Search Engine},\n journal = {CoRR},\n volume = {abs/1704.05179},\n year = {2017},\n url = {http://arxiv.org/abs/1704.05179},\n archivePrefix = {arXiv},\n eprint = {1704.05179},\n timestamp = {Mon, 13 Aug 2018 16:47:09 +0200},\n biburl = {https://dblp.org/rec/journals/corr/DunnSHGCC17.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n }\n\n", "homepage": "https://github.com/nyu-dl/dl4ir-searchQA", "license": "", "features": {"category": {"dtype": "string", "id": null, "_type": "Value"}, "air_date": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "round": {"dtype": "string", "id": null, "_type": "Value"}, "show_number": {"dtype": "int32", "id": null, "_type": "Value"}, "search_results": {"feature": {"urls": {"dtype": "string", "id": null, "_type": "Value"}, "snippets": {"dtype": "string", "id": null, "_type": "Value"}, "titles": {"dtype": "string", "id": null, "_type": "Value"}, "related_links": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "supervised_keys": null, "builder_name": "search_qa", "config_name": "train_test_val", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5303005740, "num_examples": 151295, "dataset_name": "search_qa"}, "test": {"name": "test", "num_bytes": 1466749978, "num_examples": 43228, "dataset_name": "search_qa"}, "validation": {"name": "validation", "num_bytes": 740962715, "num_examples": 21613, "dataset_name": "search_qa"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1aHPVfC5TrlnUjehtagVZoDfq4VccgaNT": {"num_bytes": 3148550732, "checksum": "1f547df8b00e919ba692ca8c133462d358a89ee6b15a8c65c40efe006ed6c4eb"}}, "download_size": 3148550732, "dataset_size": 7510718433, "size_in_bytes": 10659269165}}
|
|
|
|
search_qa.py
CHANGED
@@ -14,11 +14,10 @@
|
|
14 |
# limitations under the License.
|
15 |
|
16 |
# Lint as: python3
|
17 |
-
"""
|
18 |
-
|
19 |
|
|
|
20 |
import json
|
21 |
-
import os
|
22 |
|
23 |
import datasets
|
24 |
|
@@ -57,8 +56,21 @@ Following this approach, we built SearchQA, which consists of more than 140k que
|
|
57 |
"""
|
58 |
|
59 |
_DL_URLS = {
|
60 |
-
"raw_jeopardy":
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
}
|
63 |
# pylint: enable=line-too-long
|
64 |
|
@@ -110,59 +122,22 @@ class SearchQa(datasets.GeneratorBasedBuilder):
|
|
110 |
|
111 |
def _split_generators(self, dl_manager):
|
112 |
"""Returns SplitGenerators."""
|
113 |
-
|
114 |
-
# dl_manager is a datasets.download.DownloadManager that can be used to
|
115 |
-
# download and extract URLs
|
116 |
-
|
117 |
if self.config.name == "raw_jeopardy":
|
118 |
-
|
119 |
-
sub_folders = sorted(os.listdir(os.path.join(filepath, "jeopardy")))
|
120 |
-
all_files = []
|
121 |
-
for zip_folder in sub_folders:
|
122 |
-
if "lock" in zip_folder:
|
123 |
-
continue
|
124 |
-
zip_folder_path = os.path.join(filepath, "jeopardy", zip_folder)
|
125 |
-
file_path = dl_manager.extract(zip_folder_path)
|
126 |
-
zip_folder = zip_folder.split(".")[0]
|
127 |
-
if os.path.isdir(os.path.join(file_path, zip_folder)):
|
128 |
-
file_path = os.path.join(file_path, zip_folder)
|
129 |
-
|
130 |
-
else:
|
131 |
-
# in some cases the subfolder name contains sapces as 050000 - 059999 and 050000-059999
|
132 |
-
parts = zip_folder.split("-")
|
133 |
-
zip_folder = parts[0] + " - " + parts[1]
|
134 |
-
if os.path.isdir(os.path.join(file_path, zip_folder)):
|
135 |
-
file_path = os.path.join(file_path, zip_folder)
|
136 |
-
|
137 |
-
files = sorted(os.listdir(file_path))
|
138 |
-
|
139 |
-
files_paths = [os.path.join(file_path, file) for file in files if "__MACOSX" not in file]
|
140 |
-
all_files.extend(files_paths)
|
141 |
-
|
142 |
return [
|
143 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths":
|
144 |
]
|
145 |
elif self.config.name == "train_test_val":
|
146 |
-
filepath = dl_manager.download_and_extract(_DL_URLS["train_test_val"])
|
147 |
-
train_path = dl_manager.extract(os.path.join(filepath, "data_json", "train.zip"))
|
148 |
-
test_path = dl_manager.extract(os.path.join(filepath, "data_json", "test.zip"))
|
149 |
-
val_path = dl_manager.extract(os.path.join(filepath, "data_json", "val.zip"))
|
150 |
-
|
151 |
-
train_files = [os.path.join(train_path, file) for file in sorted(os.listdir(train_path))]
|
152 |
-
test_files = [os.path.join(test_path, file) for file in sorted(os.listdir(test_path))]
|
153 |
-
val_files = [os.path.join(val_path, file) for file in sorted(os.listdir(val_path))]
|
154 |
return [
|
155 |
-
datasets.SplitGenerator(name=
|
156 |
-
datasets.
|
157 |
-
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": val_files}),
|
158 |
]
|
159 |
|
160 |
def _generate_examples(self, filepaths):
|
161 |
"""Yields examples."""
|
162 |
-
# TODO(searchQa): Yields (key, example) tuples from the dataset
|
163 |
for i, filepath in enumerate(filepaths):
|
164 |
with open(filepath, encoding="utf-8") as f:
|
165 |
-
|
166 |
data = json.load(f)
|
167 |
category = data["category"]
|
168 |
air_date = data["air_date"]
|
|
|
14 |
# limitations under the License.
|
15 |
|
16 |
# Lint as: python3
|
17 |
+
"""SearchQA dataset."""
|
|
|
18 |
|
19 |
+
import itertools
|
20 |
import json
|
|
|
21 |
|
22 |
import datasets
|
23 |
|
|
|
56 |
"""
|
57 |
|
58 |
_DL_URLS = {
|
59 |
+
"raw_jeopardy": [
|
60 |
+
"data/raw_jeopardy/000000-029999.zip",
|
61 |
+
"data/raw_jeopardy/030000-49999.zip",
|
62 |
+
"data/raw_jeopardy/050000-059999.zip",
|
63 |
+
"data/raw_jeopardy/060000-089999.zip",
|
64 |
+
"data/raw_jeopardy/090000-119999.zip",
|
65 |
+
"data/raw_jeopardy/120000-149999.zip",
|
66 |
+
"data/raw_jeopardy/150000-179999.zip",
|
67 |
+
"data/raw_jeopardy/180000-216929.zip",
|
68 |
+
],
|
69 |
+
"train_test_val": {
|
70 |
+
"train": "data/train_test_val/train.zip",
|
71 |
+
"test": "data/train_test_val/test.zip",
|
72 |
+
"validation": "data/train_test_val/val.zip",
|
73 |
+
},
|
74 |
}
|
75 |
# pylint: enable=line-too-long
|
76 |
|
|
|
122 |
|
123 |
def _split_generators(self, dl_manager):
|
124 |
"""Returns SplitGenerators."""
|
125 |
+
data_dirs = dl_manager.download_and_extract(_DL_URLS[self.config.name])
|
|
|
|
|
|
|
126 |
if self.config.name == "raw_jeopardy":
|
127 |
+
filepaths = itertools.chain.from_iterable(dl_manager.iter_files(data_dir) for data_dir in data_dirs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
return [
|
129 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": filepaths}),
|
130 |
]
|
131 |
elif self.config.name == "train_test_val":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
return [
|
133 |
+
datasets.SplitGenerator(name=split, gen_kwargs={"filepaths": dl_manager.iter_files(data_dirs[split])})
|
134 |
+
for split in (datasets.Split.TRAIN, datasets.Split.TEST, datasets.Split.VALIDATION)
|
|
|
135 |
]
|
136 |
|
137 |
def _generate_examples(self, filepaths):
|
138 |
"""Yields examples."""
|
|
|
139 |
for i, filepath in enumerate(filepaths):
|
140 |
with open(filepath, encoding="utf-8") as f:
|
|
|
141 |
data = json.load(f)
|
142 |
category = data["category"]
|
143 |
air_date = data["air_date"]
|