Xueguang Ma commited on
Commit
0066e73
1 Parent(s): 7dcc6c2
Files changed (5) hide show
  1. .gitattributes +3 -0
  2. nq-dev.jsonl.gz +3 -0
  3. nq-test.jsonl.gz +3 -0
  4. nq-train.jsonl.gz +3 -0
  5. wikipedia-nq.py +119 -0
.gitattributes CHANGED
@@ -25,3 +25,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ nq-train.jsonl.gz filter=lfs diff=lfs merge=lfs -text
29
+ nq-dev.jsonl.gz filter=lfs diff=lfs merge=lfs -text
30
+ nq-test.jsonl.gz filter=lfs diff=lfs merge=lfs -text
nq-dev.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f587050472b9bb51ad0c1cdc3519489a75a01d6f5409fc8627df089c1371ea38
3
+ size 150188008
nq-test.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82a4bb66f122c281b79975eedd0b338a027483ced4016c8ec2dd12ca768617fa
3
+ size 139320
nq-train.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bb4cfff290839bdfd8247aded6fcad5aaba23edc7ea2e36a4f5e8f3916b0ed0
3
+ size 1355967938
wikipedia-nq.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Wikipedia NQ dataset."""
18
+
19
+ import json
20
+
21
+ import datasets
22
+
23
+ _CITATION = """
24
+ @inproceedings{karpukhin-etal-2020-dense,
25
+ title = "Dense Passage Retrieval for Open-Domain Question Answering",
26
+ author = "Karpukhin, Vladimir and Oguz, Barlas and Min, Sewon and Lewis, Patrick and Wu, Ledell and Edunov,
27
+ Sergey and Chen, Danqi and Yih, Wen-tau",
28
+ booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
29
+ month = nov,
30
+ year = "2020",
31
+ address = "Online",
32
+ publisher = "Association for Computational Linguistics",
33
+ url = "https://www.aclweb.org/anthology/2020.emnlp-main.550",
34
+ doi = "10.18653/v1/2020.emnlp-main.550",
35
+ pages = "6769--6781",
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = "dataset load script for Wikipedia NQ"
40
+
41
+ _DATASET_URLS = {
42
+ 'train': "https://huggingface.co/datasets/tevatron/wikipedia-nq/resolve/main/nq-train.jsonl.gz",
43
+ 'dev': "https://huggingface.co/datasets/tevatron/wikipedia-nq/resolve/main/nq-dev.jsonl.gz",
44
+ 'test': "https://huggingface.co/datasets/tevatron/wikipedia-nq/resolve/main/nq-test.jsonl.gz",
45
+ }
46
+
47
+
48
+ class WikipediaNq(datasets.GeneratorBasedBuilder):
49
+ VERSION = datasets.Version("0.0.1")
50
+
51
+ BUILDER_CONFIGS = [
52
+ datasets.BuilderConfig(version=VERSION,
53
+ description="Wikipedia NQ train/dev/test datasets"),
54
+ ]
55
+
56
+ def _info(self):
57
+ features = datasets.Features({
58
+ 'query_id': datasets.Value('string'),
59
+ 'query': datasets.Value('string'),
60
+ 'answers': [datasets.Value('string')],
61
+ 'positive_passages': [
62
+ {'docid': datasets.Value('string'), 'text': datasets.Value('string'),
63
+ 'title': datasets.Value('string')}
64
+ ],
65
+ 'negative_passages': [
66
+ {'docid': datasets.Value('string'), 'text': datasets.Value('string'),
67
+ 'title': datasets.Value('string')}
68
+ ],
69
+ })
70
+ return datasets.DatasetInfo(
71
+ # This is the description that will appear on the datasets page.
72
+ description=_DESCRIPTION,
73
+ # This defines the different columns of the dataset and their types
74
+ features=features, # Here we define them above because they are different between the two configurations
75
+ supervised_keys=None,
76
+ # Homepage of the dataset for documentation
77
+ homepage="",
78
+ # License for the dataset if available
79
+ license="",
80
+ # Citation for the dataset
81
+ citation=_CITATION,
82
+ )
83
+
84
+ def _split_generators(self, dl_manager):
85
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
86
+ splits = [
87
+ datasets.SplitGenerator(
88
+ name="train",
89
+ gen_kwargs={
90
+ "filepath": downloaded_files["train"],
91
+ },
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name='dev',
95
+ gen_kwargs={
96
+ "filepath": downloaded_files["dev"],
97
+ },
98
+ ),
99
+ datasets.SplitGenerator(
100
+ name='test',
101
+ gen_kwargs={
102
+ "filepath": downloaded_files["test"],
103
+ },
104
+ ),
105
+ ]
106
+ return splits
107
+
108
+ def _generate_examples(self, filepath):
109
+ """Yields examples."""
110
+ with open(filepath, encoding="utf-8") as f:
111
+ for line in f:
112
+ data = json.loads(line)
113
+ if data.get('negative_passages') is None:
114
+ data['negative_passages'] = []
115
+ if data.get('positive_passages') is None:
116
+ data['positive_passages'] = []
117
+ if data.get('answers') is None:
118
+ data['answers'] = []
119
+ yield data['query_id'], data