Delete scirepeval.py
Browse files- scirepeval.py +0 -199
scirepeval.py
DELETED
@@ -1,199 +0,0 @@
|
|
1 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
# TODO: Address all TODOs and remove all explanatory comments
|
15 |
-
"""TODO: Add a description here."""
|
16 |
-
|
17 |
-
|
18 |
-
import csv
|
19 |
-
import json
|
20 |
-
import os
|
21 |
-
import glob
|
22 |
-
|
23 |
-
import datasets
|
24 |
-
from datasets.data_files import DataFilesDict
|
25 |
-
from .scirepeval_configs import SCIREPEVAL_CONFIGS
|
26 |
-
#from datasets.packaged_modules.json import json
|
27 |
-
from datasets.utils.logging import get_logger
|
28 |
-
|
29 |
-
|
30 |
-
logger = get_logger(__name__)
|
31 |
-
# TODO: Add BibTeX citation
|
32 |
-
# Find for instance the citation on arxiv or on the dataset repo/website
|
33 |
-
_CITATION = """\
|
34 |
-
@InProceedings{huggingface:dataset,
|
35 |
-
title = {A great new dataset},
|
36 |
-
author={huggingface, Inc.
|
37 |
-
},
|
38 |
-
year={2021}
|
39 |
-
}
|
40 |
-
"""
|
41 |
-
|
42 |
-
# TODO: Add description of the dataset here
|
43 |
-
# You can copy an official description
|
44 |
-
_DESCRIPTION = """\
|
45 |
-
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
|
46 |
-
"""
|
47 |
-
|
48 |
-
# TODO: Add a link to an official homepage for the dataset here
|
49 |
-
_HOMEPAGE = ""
|
50 |
-
|
51 |
-
# TODO: Add the licence for the dataset here if you can find it
|
52 |
-
_LICENSE = ""
|
53 |
-
|
54 |
-
# TODO: Add link to the official dataset URLs here
|
55 |
-
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
56 |
-
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
57 |
-
_URLS = {
|
58 |
-
"first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
|
59 |
-
"second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
|
60 |
-
}
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
|
65 |
-
class Scirepeval(datasets.GeneratorBasedBuilder):
|
66 |
-
"""TODO: Short description of my dataset."""
|
67 |
-
|
68 |
-
VERSION = datasets.Version("1.1.0")
|
69 |
-
|
70 |
-
# This is an example of a dataset with multiple configurations.
|
71 |
-
# If you don't want/need to define several sub-sets in your dataset,
|
72 |
-
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
|
73 |
-
|
74 |
-
# If you need to make complex sub-parts in the datasets with configurable options
|
75 |
-
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
76 |
-
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
77 |
-
|
78 |
-
# You will be able to load one or the other configurations in the following list with
|
79 |
-
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
80 |
-
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
81 |
-
BUILDER_CONFIGS = SCIREPEVAL_CONFIGS
|
82 |
-
|
83 |
-
def _info(self):
|
84 |
-
return datasets.DatasetInfo(
|
85 |
-
# This is the description that will appear on the datasets page.
|
86 |
-
description=self.config.description,
|
87 |
-
# This defines the different columns of the dataset and their types
|
88 |
-
features=datasets.Features(self.config.features), # Here we define them above because they are different between the two configurations
|
89 |
-
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
|
90 |
-
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
|
91 |
-
# supervised_keys=("sentence", "label"),
|
92 |
-
# Homepage of the dataset for documentation
|
93 |
-
homepage="",
|
94 |
-
# License for the dataset if available
|
95 |
-
license=self.config.license,
|
96 |
-
# Citation for the dataset
|
97 |
-
citation=self.config.citation,
|
98 |
-
)
|
99 |
-
|
100 |
-
def _split_generators(self, dl_manager):
|
101 |
-
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
102 |
-
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
103 |
-
base_url = "https://ai2-s2-research-public.s3.us-west-2.amazonaws.com/scirepeval"
|
104 |
-
data_urls = dict()
|
105 |
-
data_dir = self.config.url if self.config.url else self.config.name
|
106 |
-
if self.config.is_training:
|
107 |
-
data_urls = {"train": f"{base_url}/train/{data_dir}/train.jsonl"}
|
108 |
-
|
109 |
-
if "refresh" not in self.config.name:
|
110 |
-
data_urls.update({"val": f"{base_url}/train/{data_dir}/val.jsonl"})
|
111 |
-
|
112 |
-
if "cite_prediction" not in self.config.name:
|
113 |
-
data_urls.update({"test": f"{base_url}/test/{data_dir}/meta.jsonl"})
|
114 |
-
# print(data_urls)
|
115 |
-
downloaded_files = dl_manager.download_and_extract(data_urls)
|
116 |
-
# print(downloaded_files)
|
117 |
-
splits = []
|
118 |
-
if "test" in downloaded_files:
|
119 |
-
splits = [datasets.SplitGenerator(
|
120 |
-
name=datasets.Split("evaluation"),
|
121 |
-
# These kwargs will be passed to _generate_examples
|
122 |
-
gen_kwargs={
|
123 |
-
"filepath": downloaded_files["test"],
|
124 |
-
"split": "evaluation"
|
125 |
-
},
|
126 |
-
),
|
127 |
-
]
|
128 |
-
|
129 |
-
if "train" in downloaded_files:
|
130 |
-
splits.append(
|
131 |
-
datasets.SplitGenerator(
|
132 |
-
name=datasets.Split.TRAIN,
|
133 |
-
# These kwargs will be passed to _generate_examples
|
134 |
-
gen_kwargs={
|
135 |
-
"filepath": downloaded_files["train"],
|
136 |
-
"split": "train",
|
137 |
-
},
|
138 |
-
))
|
139 |
-
if "val" in downloaded_files:
|
140 |
-
splits.append(datasets.SplitGenerator(
|
141 |
-
name=datasets.Split.VALIDATION,
|
142 |
-
# These kwargs will be passed to _generate_examples
|
143 |
-
gen_kwargs={
|
144 |
-
"filepath": downloaded_files["val"],
|
145 |
-
"split": "validation",
|
146 |
-
}))
|
147 |
-
return splits
|
148 |
-
|
149 |
-
|
150 |
-
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
151 |
-
def _generate_examples(self, filepath, split):
|
152 |
-
def read_data(data_path):
|
153 |
-
task_data = []
|
154 |
-
try:
|
155 |
-
task_data = json.load(open(data_path, "r", encoding="utf-8"))
|
156 |
-
except:
|
157 |
-
with open(data_path) as f:
|
158 |
-
task_data = [json.loads(line) for line in f]
|
159 |
-
if type(task_data) == dict:
|
160 |
-
task_data = list(task_data.values())
|
161 |
-
return task_data
|
162 |
-
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
163 |
-
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
164 |
-
# data = read_data(filepath)
|
165 |
-
seen_keys = set()
|
166 |
-
IGNORE=set(["n_key_citations", "session_id", "user_id", "user"])
|
167 |
-
logger.warning(filepath)
|
168 |
-
with open(filepath, encoding="utf-8") as f:
|
169 |
-
for line in f:
|
170 |
-
d = json.loads(line)
|
171 |
-
d = {k:v for k,v in d.items() if k not in IGNORE}
|
172 |
-
key="doc_id" if "cite_prediction_" not in self.config.name else "corpus_id"
|
173 |
-
if self.config.task_type == "proximity":
|
174 |
-
if "cite_prediction" in self.config.name:
|
175 |
-
if "arxiv_id" in d["query"]:
|
176 |
-
for item in ["query", "pos", "neg"]:
|
177 |
-
del d[item]["arxiv_id"]
|
178 |
-
del d[item]["doi"]
|
179 |
-
if "fos" in d["query"]:
|
180 |
-
del d["query"]["fos"]
|
181 |
-
if "score" in d["pos"]:
|
182 |
-
del d["pos"]["score"]
|
183 |
-
yield str(d["query"][key]) + str(d["pos"][key]) + str(d["neg"][key]), d
|
184 |
-
else:
|
185 |
-
if d["query"][key] not in seen_keys:
|
186 |
-
seen_keys.add(d["query"][key])
|
187 |
-
yield str(d["query"][key]), d
|
188 |
-
else:
|
189 |
-
if d[key] not in seen_keys:
|
190 |
-
seen_keys.add(d[key])
|
191 |
-
if self.config.task_type != "search":
|
192 |
-
if "corpus_id" not in d:
|
193 |
-
d["corpus_id"] = None
|
194 |
-
if "scidocs" in self.config.name:
|
195 |
-
if "cited by" not in d:
|
196 |
-
d["cited_by"] = []
|
197 |
-
if type(d["corpus_id"]) == str:
|
198 |
-
d["corpus_id"] = None
|
199 |
-
yield d[key], d
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|