Datasets:

Tasks:
Other
Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
VictorSanh commited on
Commit
a4fe76a
1 Parent(s): 2e4e0ca
Files changed (1) hide show
  1. P3.py +11 -8
P3.py CHANGED
@@ -38,7 +38,7 @@ _LICENSE = "Apache License 2.0"
38
 
39
  _HOMEPAGE = "https://github.com/bigscience-workshop/promptsource"
40
 
41
- _DATA_PATH = "./data/"
42
 
43
 
44
  def load_cached_task(cache_dir, split):
@@ -85,6 +85,8 @@ def find_task_splits_and_features():
85
  for stats in glob.glob(f"{_DATA_PATH}/*/stats.*.json"):
86
  folder_path = os.path.dirname(stats)
87
  task_name = folder_path.split("/")[-1]
 
 
88
  split_name = os.path.basename(stats).split(".")[1]
89
 
90
  if not os.path.exists(f"{folder_path}/COMPLETED"):
@@ -114,8 +116,8 @@ def find_task_splits_and_features():
114
  return task_and_their_splits
115
 
116
 
117
- TASK_SPLITS_AND_FEATURES = find_task_splits_and_features()
118
-
119
 
120
 
121
  class P3Config(datasets.BuilderConfig):
@@ -148,7 +150,7 @@ class P3(datasets.GeneratorBasedBuilder):
148
  features=splits_and_features["features"],
149
  score_eval=task_name.endswith("score_eval")
150
  )
151
- for task_name, splits_and_features in TASK_SPLITS_AND_FEATURES.items()
152
  ]
153
 
154
  def _info(self):
@@ -180,12 +182,13 @@ class P3(datasets.GeneratorBasedBuilder):
180
 
181
  def _split_generators(self, dl_manager):
182
  split_generators = []
 
183
  if "train" in self.config.splits:
184
  split_generators.append(
185
  datasets.SplitGenerator(
186
  name=datasets.Split.TRAIN,
187
  gen_kwargs={
188
- "data_folder": os.path.join(_DATA_PATH, self.config.name),
189
  "split": "train",
190
  }
191
  )
@@ -195,7 +198,7 @@ class P3(datasets.GeneratorBasedBuilder):
195
  datasets.SplitGenerator(
196
  name=datasets.Split.VALIDATION,
197
  gen_kwargs={
198
- "data_folder": os.path.join(_DATA_PATH, self.config.name),
199
  "split": "validation",
200
  }
201
  )
@@ -205,7 +208,7 @@ class P3(datasets.GeneratorBasedBuilder):
205
  datasets.SplitGenerator(
206
  name=datasets.Split.TEST,
207
  gen_kwargs={
208
- "data_folder": os.path.join(_DATA_PATH, self.config.name),
209
  "split": "test",
210
  }
211
  )
@@ -217,7 +220,7 @@ class P3(datasets.GeneratorBasedBuilder):
217
  datasets.SplitGenerator(
218
  name=datasets.Split(special_split_name),
219
  gen_kwargs={
220
- "data_folder": os.path.join(_DATA_PATH, self.config.name),
221
  "split": special_split_name,
222
  }
223
  )
 
38
 
39
  _HOMEPAGE = "https://github.com/bigscience-workshop/promptsource"
40
 
41
+ _DATA_PATH = "data/"
42
 
43
 
44
  def load_cached_task(cache_dir, split):
 
85
  for stats in glob.glob(f"{_DATA_PATH}/*/stats.*.json"):
86
  folder_path = os.path.dirname(stats)
87
  task_name = folder_path.split("/")[-1]
88
+ if "rte" not in task_name:
89
+ continue
90
  split_name = os.path.basename(stats).split(".")[1]
91
 
92
  if not os.path.exists(f"{folder_path}/COMPLETED"):
 
116
  return task_and_their_splits
117
 
118
 
119
+ _TASK_SPLITS_AND_FEATURES = find_task_splits_and_features()
120
+ _URLs = {task_name: f"{_DATA_PATH}/{task_name}" for task_name in _TASK_SPLITS_AND_FEATURES.keys()}
121
 
122
 
123
  class P3Config(datasets.BuilderConfig):
 
150
  features=splits_and_features["features"],
151
  score_eval=task_name.endswith("score_eval")
152
  )
153
+ for task_name, splits_and_features in _TASK_SPLITS_AND_FEATURES.items()
154
  ]
155
 
156
  def _info(self):
 
182
 
183
  def _split_generators(self, dl_manager):
184
  split_generators = []
185
+ data_dir = dl_manager.download_and_extract(_URLs)
186
  if "train" in self.config.splits:
187
  split_generators.append(
188
  datasets.SplitGenerator(
189
  name=datasets.Split.TRAIN,
190
  gen_kwargs={
191
+ "data_folder": data_dir,
192
  "split": "train",
193
  }
194
  )
 
198
  datasets.SplitGenerator(
199
  name=datasets.Split.VALIDATION,
200
  gen_kwargs={
201
+ "data_folder": data_dir,
202
  "split": "validation",
203
  }
204
  )
 
208
  datasets.SplitGenerator(
209
  name=datasets.Split.TEST,
210
  gen_kwargs={
211
+ "data_folder": data_dir,
212
  "split": "test",
213
  }
214
  )
 
220
  datasets.SplitGenerator(
221
  name=datasets.Split(special_split_name),
222
  gen_kwargs={
223
+ "data_folder": data_dir,
224
  "split": special_split_name,
225
  }
226
  )