Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
lovodkin93 commited on
Commit
19f0e69
1 Parent(s): abb1ca3

updated the script

Browse files
Files changed (1) hide show
  1. Controlled-Text-Reduction-dataset.py +35 -44
Controlled-Text-Reduction-dataset.py CHANGED
@@ -313,21 +313,28 @@ SOFTWARE."""
313
  # },
314
  # }
315
 
316
- # COLUMNS = ["doc_text", "summary_text", "highlight_spans"]
317
-
318
-
319
  _URLs = {
320
- "wikinews.train": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_train.tsv",
321
- "wikinews.dev": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_dev.tsv",
322
- "wikinews.test": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_test.tsv",
323
- "wikipedia.train": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_train.tsv",
324
- "wikipedia.dev": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_dev.tsv",
325
- "wikipedia.test": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_test.tsv",
326
  }
327
 
328
- COLUMNS = ['qasrl_id', 'sentence', 'worker_id', 'full_question', 'full_answer',
329
- 'question_start', 'question_aux', 'question_body', 'answer',
330
- 'untokenized sentence', 'target indices for untok sent']
 
 
 
 
 
 
 
 
 
 
 
 
 
331
 
332
 
333
  # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
@@ -345,17 +352,17 @@ class ControlledTectReduction(datasets.GeneratorBasedBuilder):
345
  ]
346
 
347
  DEFAULT_CONFIG_NAME = (
348
- "plain_text" # It's not mandatory to have a default configuration. Just use one if it make sense.
349
  )
350
 
351
  def _info(self):
352
  features = datasets.Features(
353
  {
354
- "sentence": datasets.Value("string"),
355
- "sent_id": datasets.Value("string"),
356
- "question": datasets.Sequence(datasets.Value("string")),
357
- "answers": datasets.Sequence(datasets.Value("string")),
358
  }
 
359
  )
360
  return datasets.DatasetInfo(
361
  # This is the description that will appear on the datasets page.
@@ -386,49 +393,33 @@ class ControlledTectReduction(datasets.GeneratorBasedBuilder):
386
  name=datasets.Split.TRAIN,
387
  # These kwargs will be passed to _generate_examples
388
  gen_kwargs={
389
- "filepaths": [corpora["wikinews.train"],
390
- corpora["wikipedia.train"]],
391
  },
392
  ),
393
  datasets.SplitGenerator(
394
  name=datasets.Split.VALIDATION,
395
  # These kwargs will be passed to _generate_examples
396
  gen_kwargs={
397
- "filepaths": [corpora["wikinews.dev"],
398
- corpora["wikipedia.dev"]],
399
  },
400
  ),
401
  datasets.SplitGenerator(
402
  name=datasets.Split.TEST,
403
  # These kwargs will be passed to _generate_examples
404
  gen_kwargs={
405
- "filepaths": [corpora["wikinews.test"],
406
- corpora["wikipedia.test"]],
407
  },
408
  ),
409
  ]
410
 
411
- def _generate_examples(self, filepaths: List[str]):
412
 
413
- """
414
- Yields QA-Discourse examples from a tsv file.
415
- Sentences with no QAs will yield an ``empty QA'' record, where both 'question' and 'answers' are empty lists.
416
- """
417
 
418
  # merge annotations from sections
419
- df = pd.concat([pd.read_csv(fn, sep='\t', error_bad_lines=False) for fn in filepaths]).reset_index(drop=True)
420
- df = df.applymap(str) # must turn all values to strings explicitly to avoid type errors
421
- for counter, row in df.iterrows():
422
- # Prepare question (3 "slots" and question mark)
423
- question = [row.question_start, row.question_aux, row.question_body.rstrip('?'), '?']
424
- answer = [row.answer]
425
- if row.question_start == "_": # sentence has no QAs
426
- question = []
427
- answer = []
428
-
429
- yield counter, {
430
- "sentence": row.sentence,
431
- "sent_id": row.qasrl_id,
432
- "question": question,
433
- "answers": answer,
434
- }
 
313
  # },
314
  # }
315
 
 
 
 
316
  _URLs = {
317
+ "dev_DUC-2001-2002": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv",
318
+ "test_DUC-2001-2002": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv",
319
+ "train_DUC-2001-2002": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_DUC-2001-2002.csv"
 
 
 
320
  }
321
 
322
+
323
+ COLUMNS = ["doc_text", "summary_text", "highlight_spans"]
324
+
325
+
326
+ # _URLs = {
327
+ # "wikinews.train": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_train.tsv",
328
+ # "wikinews.dev": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_dev.tsv",
329
+ # "wikinews.test": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_test.tsv",
330
+ # "wikipedia.train": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_train.tsv",
331
+ # "wikipedia.dev": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_dev.tsv",
332
+ # "wikipedia.test": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_test.tsv",
333
+ # }
334
+
335
+ # COLUMNS = ['qasrl_id', 'sentence', 'worker_id', 'full_question', 'full_answer',
336
+ # 'question_start', 'question_aux', 'question_body', 'answer',
337
+ # 'untokenized sentence', 'target indices for untok sent']
338
 
339
 
340
  # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
 
352
  ]
353
 
354
  DEFAULT_CONFIG_NAME = (
355
+ "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
356
  )
357
 
358
  def _info(self):
359
  features = datasets.Features(
360
  {
361
+ "doc_text": datasets.Value("string"),
362
+ "summary_text": datasets.Value("string"),
363
+ "question": datasets.Value("string"),
 
364
  }
365
+
366
  )
367
  return datasets.DatasetInfo(
368
  # This is the description that will appear on the datasets page.
 
393
  name=datasets.Split.TRAIN,
394
  # These kwargs will be passed to _generate_examples
395
  gen_kwargs={
396
+ "filepaths": corpora["train_DUC-2001-2002"],
 
397
  },
398
  ),
399
  datasets.SplitGenerator(
400
  name=datasets.Split.VALIDATION,
401
  # These kwargs will be passed to _generate_examples
402
  gen_kwargs={
403
+ "filepaths": corpora["dev_DUC-2001-2002"],
 
404
  },
405
  ),
406
  datasets.SplitGenerator(
407
  name=datasets.Split.TEST,
408
  # These kwargs will be passed to _generate_examples
409
  gen_kwargs={
410
+ "filepaths": corpora["test_DUC-2001-2002"],
 
411
  },
412
  ),
413
  ]
414
 
415
+ def _generate_examples(self, filepath: List[str]):
416
 
417
+ """ Yields Controlled Text Reduction examples from a csv file. Each instance contains the document, the summary and the pre-selected spans."""
 
 
 
418
 
419
  # merge annotations from sections
420
+ df = pd.read_csv(filepath, index_col=False)
421
+ for counter, dic in enumerate(df.to_dict('records')):
422
+ columns_to_load_into_object = ["doc_text", "summary_text", "highlight_spans"]
423
+ for key in columns_to_load_into_object:
424
+ dic[key] = eval(dic[key])
425
+ yield counter, dic