parquet-converter commited on
Commit
8ede5c5
1 Parent(s): 37a7d5b

Update parquet files

Browse files
nq-dev.jsonl.gz → default/wikipedia-nq-dev.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f587050472b9bb51ad0c1cdc3519489a75a01d6f5409fc8627df089c1371ea38
3
- size 150188008
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cedbf05344303ccbfd343e0e458f0d021d4712d9f77ccc17cba2f61b0c2b7008
3
+ size 241692842
nq-test.jsonl.gz → default/wikipedia-nq-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:82a4bb66f122c281b79975eedd0b338a027483ced4016c8ec2dd12ca768617fa
3
- size 139320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69f27f0e1d8f3cd507e2f7e7c833b411c23c0f0abdd3e30dcf25f883c622d86c
3
+ size 241129
nq-train.jsonl.gz → default/wikipedia-nq-train-00000-of-00008.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9bb4cfff290839bdfd8247aded6fcad5aaba23edc7ea2e36a4f5e8f3916b0ed0
3
- size 1355967938
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34f4c4036ae7be20d2be9cb64b24c95e14a65f4b4a4195c0adfa092b40346715
3
+ size 297961232
default/wikipedia-nq-train-00001-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d7aff68b0f4d72ab7a947656f485ec4ac6f92bb52576bc702a5edcc3ed4a291
3
+ size 298004753
default/wikipedia-nq-train-00002-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6f18dd600b26ff16dec513a3f1ad3cdcbbefc22c220343d97ec2973f56b388d
3
+ size 297878889
default/wikipedia-nq-train-00003-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc8fa7127f8f32bb8a91fe0b0040de510ef347fd9bbd3480a1c707afe1ea5d1a
3
+ size 297707015
default/wikipedia-nq-train-00004-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b38551dfe8448f04280e127c90756a3d0d199a55ea25272ee13e775a885f7cff
3
+ size 297582494
default/wikipedia-nq-train-00005-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81172c5a92241007ccfcc27e7f77f3c66d8dc5595ed16b0859d97b201d21f96e
3
+ size 297618645
default/wikipedia-nq-train-00006-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f58c3e6f09b2965b9b4a030f135ad4c4e20c85bd6696dabf9a06dd03b5b92a36
3
+ size 298144944
default/wikipedia-nq-train-00007-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71df8dd0c7c771fb91b665218fe81260d7a1febfde798e7d56ac6d5c48f5f1ab
3
+ size 97553009
wikipedia-nq.py DELETED
@@ -1,111 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Wikipedia NQ dataset."""
18
-
19
- import json
20
-
21
- import datasets
22
-
23
- _CITATION = """
24
- @inproceedings{karpukhin-etal-2020-dense,
25
- title = "Dense Passage Retrieval for Open-Domain Question Answering",
26
- author = "Karpukhin, Vladimir and Oguz, Barlas and Min, Sewon and Lewis, Patrick and Wu, Ledell and Edunov,
27
- Sergey and Chen, Danqi and Yih, Wen-tau",
28
- booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
29
- month = nov,
30
- year = "2020",
31
- address = "Online",
32
- publisher = "Association for Computational Linguistics",
33
- url = "https://www.aclweb.org/anthology/2020.emnlp-main.550",
34
- doi = "10.18653/v1/2020.emnlp-main.550",
35
- pages = "6769--6781",
36
- }
37
- """
38
-
39
- _DESCRIPTION = "dataset load script for Wikipedia NQ"
40
-
41
- _DATASET_URLS = {
42
- 'train': "https://huggingface.co/datasets/Tevatron/wikipedia-nq/resolve/main/nq-train.jsonl.gz",
43
- 'dev': "https://huggingface.co/datasets/Tevatron/wikipedia-nq/resolve/main/nq-dev.jsonl.gz",
44
- 'test': "https://huggingface.co/datasets/Tevatron/wikipedia-nq/resolve/main/nq-test.jsonl.gz",
45
- }
46
-
47
-
48
- class WikipediaNq(datasets.GeneratorBasedBuilder):
49
- VERSION = datasets.Version("0.0.1")
50
-
51
- BUILDER_CONFIGS = [
52
- datasets.BuilderConfig(version=VERSION,
53
- description="Wikipedia NQ train/dev/test datasets"),
54
- ]
55
-
56
- def _info(self):
57
- features = datasets.Features({
58
- 'query_id': datasets.Value('string'),
59
- 'query': datasets.Value('string'),
60
- 'answers': [datasets.Value('string')],
61
- 'positive_passages': [
62
- {'docid': datasets.Value('string'), 'text': datasets.Value('string'),
63
- 'title': datasets.Value('string')}
64
- ],
65
- 'negative_passages': [
66
- {'docid': datasets.Value('string'), 'text': datasets.Value('string'),
67
- 'title': datasets.Value('string')}
68
- ],
69
- })
70
- return datasets.DatasetInfo(
71
- # This is the description that will appear on the datasets page.
72
- description=_DESCRIPTION,
73
- # This defines the different columns of the dataset and their types
74
- features=features, # Here we define them above because they are different between the two configurations
75
- supervised_keys=None,
76
- # Homepage of the dataset for documentation
77
- homepage="",
78
- # License for the dataset if available
79
- license="",
80
- # Citation for the dataset
81
- citation=_CITATION,
82
- )
83
-
84
- def _split_generators(self, dl_manager):
85
- if self.config.data_files:
86
- downloaded_files = self.config.data_files
87
- else:
88
- downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
89
- splits = [
90
- datasets.SplitGenerator(
91
- name=split,
92
- gen_kwargs={
93
- "files": [downloaded_files[split]] if isinstance(downloaded_files[split], str) else downloaded_files[split],
94
- },
95
- ) for split in downloaded_files
96
- ]
97
- return splits
98
-
99
- def _generate_examples(self, files):
100
- """Yields examples."""
101
- for filepath in files:
102
- with open(filepath, encoding="utf-8") as f:
103
- for line in f:
104
- data = json.loads(line)
105
- if data.get('negative_passages') is None:
106
- data['negative_passages'] = []
107
- if data.get('positive_passages') is None:
108
- data['positive_passages'] = []
109
- if data.get('answers') is None:
110
- data['answers'] = []
111
- yield data['query_id'], data