albertvillanova HF staff commited on
Commit
bb447c3
1 Parent(s): 4469a71

Delete loading script

Browse files
Files changed (1) hide show
  1. piaf.py +0 -139
piaf.py DELETED
@@ -1,139 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """PIAF Question Answering Dataset"""
18
-
19
-
20
- import json
21
-
22
- import datasets
23
- from datasets.tasks import QuestionAnsweringExtractive
24
-
25
-
26
- logger = datasets.logging.get_logger(__name__)
27
-
28
-
29
- _CITATION = """\
30
- @InProceedings{keraron-EtAl:2020:LREC,
31
- author = {Keraron, Rachel and Lancrenon, Guillaume and Bras, Mathilde and Allary, Frédéric and Moyse, Gilles and Scialom, Thomas and Soriano-Morales, Edmundo-Pavel and Staiano, Jacopo},
32
- title = {Project PIAF: Building a Native French Question-Answering Dataset},
33
- booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference},
34
- month = {May},
35
- year = {2020},
36
- address = {Marseille, France},
37
- publisher = {European Language Resources Association},
38
- pages = {5483--5492},
39
- abstract = {Motivated by the lack of data for non-English languages, in particular for the evaluation of downstream tasks such as Question Answering, we present a participatory effort to collect a native French Question Answering Dataset. Furthermore, we describe and publicly release the annotation tool developed for our collection effort, along with the data obtained and preliminary baselines.},
40
- url = {https://www.aclweb.org/anthology/2020.lrec-1.673}
41
- }
42
- """
43
-
44
- _DESCRIPTION = """\
45
- Piaf is a reading comprehension \
46
- dataset. This version, published in February 2020, contains 3835 questions on French Wikipedia.
47
- """
48
-
49
- _URLS = {"train": "https://github.com/etalab-ia/piaf-code/raw/master/piaf-v1.0.json"}
50
-
51
-
52
- class PiafConfig(datasets.BuilderConfig):
53
- """BuilderConfig for PIAF."""
54
-
55
- def __init__(self, **kwargs):
56
- """BuilderConfig for PIAF.
57
-
58
- Args:
59
- **kwargs: keyword arguments forwarded to super.
60
- """
61
- super(PiafConfig, self).__init__(**kwargs)
62
-
63
-
64
- class Piaf(datasets.GeneratorBasedBuilder):
65
- """The Piaf Question Answering Dataset. Version 1.0."""
66
-
67
- BUILDER_CONFIGS = [
68
- PiafConfig(
69
- name="plain_text",
70
- version=datasets.Version("1.0.0", ""),
71
- description="Plain text",
72
- ),
73
- ]
74
-
75
- def _info(self):
76
- return datasets.DatasetInfo(
77
- description=_DESCRIPTION,
78
- features=datasets.Features(
79
- {
80
- "id": datasets.Value("string"),
81
- "title": datasets.Value("string"),
82
- "context": datasets.Value("string"),
83
- "question": datasets.Value("string"),
84
- "answers": datasets.features.Sequence(
85
- {
86
- "text": datasets.Value("string"),
87
- "answer_start": datasets.Value("int32"),
88
- }
89
- ),
90
- }
91
- ),
92
- # No default supervised_keys (as we have to pass both question
93
- # and context as input).
94
- supervised_keys=None,
95
- homepage="https://piaf.etalab.studio",
96
- citation=_CITATION,
97
- task_templates=[
98
- QuestionAnsweringExtractive(
99
- question_column="question", context_column="context", answers_column="answers"
100
- )
101
- ],
102
- )
103
-
104
- def _split_generators(self, dl_manager):
105
- urls_to_download = _URLS
106
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
107
-
108
- return [
109
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
110
- ]
111
-
112
- def _generate_examples(self, filepath):
113
- """This function returns the examples in the raw (text) form."""
114
- logger.info("generating examples from = %s", filepath)
115
- with open(filepath, encoding="utf-8") as f:
116
- dataset = json.load(f)
117
- for article in dataset["data"]:
118
- title = article.get("title", "").strip()
119
- for paragraph in article["paragraphs"]:
120
- context = paragraph["context"].strip()
121
- for qa in paragraph["qas"]:
122
- question = qa["question"].strip()
123
- id_ = qa["id"]
124
-
125
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
126
- answers = [answer["text"].strip() for answer in qa["answers"]]
127
-
128
- # Features currently used are "context", "question", and "answers".
129
- # Others are extracted here for the ease of future expansions.
130
- yield id_, {
131
- "title": title,
132
- "context": context,
133
- "question": question,
134
- "id": id_,
135
- "answers": {
136
- "answer_start": answer_starts,
137
- "text": answers,
138
- },
139
- }