Datasets:

Modalities:
Text
Languages:
Hebrew
ArXiv:
Libraries:
Datasets
parquet-converter commited on
Commit
63dd914
1 Parent(s): 9dde379

Update parquet files

Browse files
README.md DELETED
@@ -1,34 +0,0 @@
1
- ---
2
- task_categories:
3
- - question-answering
4
- language:
5
- - he
6
- ---
7
-
8
- # ParaShoot
9
-
10
- [ParaShoot](https://github.com/omrikeren/ParaShoot): A Hebrew question and answering dataset in the style of [SQuAD](https://arxiv.org/abs/1606.05250), based on articles scraped from Wikipedia. The dataset contains a few thousand crowdsource-annotated pairs of questions and answers, in a setting suitable for few-shot learning.
11
-
12
- For more details and quality analysis, see the [paper](https://arxiv.org/abs/2109.11314).
13
-
14
- ## Dataset Statistics
15
-
16
- | **#Items** | **#Articles** | **#Paragraphs** | |
17
- | ---------- | ------------- | --------------- | ------- |
18
- | Train | 1792 | 295 | 565 |
19
- | Dev | 221 | 33 | 63 |
20
- | Test | 1025 | 165 | 319 |
21
- | **Total** | **3038** | **493** | **947** |
22
-
23
-
24
- ## Citing
25
- If you use ParaShoot in your research, please cite the ParaShoot paper:
26
- ```bibtex
27
- @inproceedings{keren2021parashoot,
28
- title={ParaShoot: A Hebrew Question Answering Dataset},
29
- author={Keren, Omri and Levy, Omer},
30
- booktitle={Proceedings of the 3rd Workshop on Machine Reading for Question Answering},
31
- pages={106--112},
32
- year={2021}
33
- }
34
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test.tar.gz → default/parashoot-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a079ad9c255e61515a57def3c71304ee53fe95fa67d391ac879806908456c369
3
- size 234806
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4a053c566501583916bdea71c331af8fce7512a550e21d00231d15355211a16
3
+ size 367530
data/train.tar.gz → default/parashoot-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c540c758863c7dca109453e56d53929ce4b500d67f6a1cd06c198f079be1ad5a
3
- size 395844
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cbf57ccc4ab39d45b8651b01c4be25ded5c39ae8991763a9a5ce6209ca0bcdb
3
+ size 619897
data/dev.tar.gz → default/parashoot-validation.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c39bf42e84faea7ba95ec7b81d1bca810c78547b8567b8116280bbdccb009670
3
- size 50374
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a56248875d4294e5fed77753c332e60cf4d1fab42f4d9f9d32b3cf84d459589
3
+ size 84609
parashoot.py DELETED
@@ -1,134 +0,0 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- import json
4
- import os
5
-
6
- import datasets
7
- from datasets.tasks import QuestionAnsweringExtractive
8
-
9
-
10
- logger = datasets.logging.get_logger(__name__)
11
-
12
-
13
- _CITATION = """\
14
- @inproceedings{keren2021parashoot,
15
- title={ParaShoot: A Hebrew Question Answering Dataset},
16
- author={Keren, Omri and Levy, Omer},
17
- booktitle={Proceedings of the 3rd Workshop on Machine Reading for Question Answering},
18
- pages={106--112},
19
- year={2021}
20
- }
21
- """
22
-
23
- _DESCRIPTION = """
24
- A Hebrew question and answering dataset in the style of SQuAD, based on articles scraped from Wikipedia. The dataset contains a few thousand crowdsource-annotated pairs of questions and answers, in a setting suitable for few-shot learning.
25
- """
26
-
27
- _URLS = {
28
- "train": "data/train.tar.gz",
29
- "validation": "data/dev.tar.gz",
30
- "test": "data/test.tar.gz",
31
- }
32
-
33
-
34
- class ParashootConfig(datasets.BuilderConfig):
35
- """BuilderConfig for Parashoot."""
36
-
37
- def __init__(self, **kwargs):
38
- """BuilderConfig for Parashoot.
39
- Args:
40
- **kwargs: keyword arguments forwarded to super.
41
- """
42
- super(ParashootConfig, self).__init__(**kwargs)
43
-
44
-
45
- class Parashoot(datasets.GeneratorBasedBuilder):
46
- """Parashoot: The Hebrew Question Answering Dataset. Version 1.1."""
47
-
48
- BUILDER_CONFIGS = [
49
- ParashootConfig(
50
- version=datasets.Version("1.1.0", ""),
51
- description=_DESCRIPTION,
52
- ),
53
- ]
54
-
55
- def _info(self):
56
- return datasets.DatasetInfo(
57
- description=_DESCRIPTION,
58
- features=datasets.Features(
59
- {
60
- "id": datasets.Value("string"),
61
- "title": datasets.Value("string"),
62
- "context": datasets.Value("string"),
63
- "question": datasets.Value("string"),
64
- "answers": datasets.features.Sequence(
65
- {
66
- "text": datasets.Value("string"),
67
- "answer_start": datasets.Value("int32"),
68
- }
69
- ),
70
- }
71
- ),
72
- # No default supervised_keys (as we have to pass both question
73
- # and context as input).
74
- supervised_keys=None,
75
- homepage="https://github.com/omrikeren/ParaShoot",
76
- citation=_CITATION,
77
- task_templates=[
78
- QuestionAnsweringExtractive(
79
- question_column="question",
80
- context_column="context",
81
- answers_column="answers",
82
- )
83
- ],
84
- )
85
-
86
- def _split_generators(self, dl_manager):
87
- downloaded_files = dl_manager.download_and_extract(_URLS)
88
-
89
- return [
90
- datasets.SplitGenerator(
91
- name=datasets.Split.TRAIN,
92
- gen_kwargs={
93
- "filepath": downloaded_files["train"],
94
- "basename": "train.jsonl",
95
- },
96
- ),
97
- datasets.SplitGenerator(
98
- name=datasets.Split.VALIDATION,
99
- gen_kwargs={
100
- "filepath": downloaded_files["validation"],
101
- "basename": "dev.jsonl",
102
- },
103
- ),
104
- datasets.SplitGenerator(
105
- name=datasets.Split.TEST,
106
- gen_kwargs={
107
- "filepath": downloaded_files["test"],
108
- "basename": "test.jsonl",
109
- },
110
- ),
111
- ]
112
-
113
- def _generate_examples(self, filepath, basename):
114
- """This function returns the examples in the raw (text) form."""
115
- logger.info("generating examples from = %s", filepath)
116
- key = 0
117
- with open(os.path.join(filepath, basename), encoding="utf-8") as f:
118
- for line in f:
119
- article = json.loads(line)
120
- title = article.get("title", "")
121
- context = article["context"]
122
- answer_starts = article["answers"]["answer_start"]
123
- answers = article["answers"]["text"]
124
- yield key, {
125
- "title": title,
126
- "context": context,
127
- "question": article["question"],
128
- "id": article["id"],
129
- "answers": {
130
- "answer_start": answer_starts,
131
- "text": answers,
132
- },
133
- }
134
- key += 1