igorcs commited on
Commit
c1daf1d
·
verified ·
1 Parent(s): 3c3e56c

Adding prompt and supporting text for Source A

Browse files
Files changed (1) hide show
  1. aes_enem_dataset.py +51 -6
aes_enem_dataset.py CHANGED
@@ -71,6 +71,8 @@ ESSAY_TO_IGNORE = [
71
  CSV_HEADER = [
72
  "id",
73
  "id_prompt",
 
 
74
  "title",
75
  "essay",
76
  "grades",
@@ -138,6 +140,8 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
138
  {
139
  "id": datasets.Value("string"),
140
  "id_prompt": datasets.Value("string"),
 
 
141
  "essay_title": datasets.Value("string"),
142
  "essay_text": datasets.Value("string"),
143
  "grades": datasets.Sequence(datasets.Value("int16")),
@@ -341,13 +345,13 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
341
  if self.config.name == "sourceAWithGraders":
342
  grader_a, grader_b = self._parse_graders_data(dirname)
343
  grader_a_data = pd.merge(
344
- train_df[["id", "id_prompt","essay"]],
345
  grader_a.drop(columns=['essay']),
346
  on=["id", "id_prompt"],
347
  how="inner",
348
  )
349
  grader_b_data = pd.merge(
350
- train_df[["id", "id_prompt","essay"]],
351
  grader_b.drop(columns=['essay']),
352
  on=["id", "id_prompt"],
353
  how="inner",
@@ -356,13 +360,13 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
356
  train_df = pd.concat([train_df, grader_b_data])
357
 
358
  grader_a_data = pd.merge(
359
- val_df[["id", "id_prompt","essay"]],
360
  grader_a.drop(columns=['essay']),
361
  on=["id", "id_prompt"],
362
  how="inner",
363
  )
364
  grader_b_data = pd.merge(
365
- val_df[["id", "id_prompt","essay"]],
366
  grader_b.drop(columns=['essay']),
367
  on=["id", "id_prompt"],
368
  how="inner",
@@ -371,13 +375,13 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
371
  val_df = pd.concat([val_df, grader_b_data])
372
 
373
  grader_a_data = pd.merge(
374
- test_df[["id", "id_prompt","essay"]],
375
  grader_a.drop(columns=['essay']),
376
  on=["id", "id_prompt"],
377
  how="inner",
378
  )
379
  grader_b_data = pd.merge(
380
- test_df[["id", "id_prompt","essay"]],
381
  grader_b.drop(columns=['essay']),
382
  on=["id", "id_prompt"],
383
  how="inner",
@@ -413,6 +417,8 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
413
  yield i, {
414
  "id": row["id"],
415
  "id_prompt": row["id_prompt"],
 
 
416
  "essay_title": row["title"],
417
  "essay_text": row["essay"],
418
  "grades": grades,
@@ -633,6 +639,37 @@ class HTMLParser:
633
  new_list.append(phrase)
634
  return new_list
635
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
636
  def parse(self, config_name):
637
  for key, filepath in self.paths_dict.items():
638
  if key != config_name:
@@ -670,6 +707,12 @@ class HTMLParser:
670
  essay_year = self._get_essay_year(
671
  self.apply_soup(prompt, "Prompt.html")
672
  )
 
 
 
 
 
 
673
  for essay in prompt_essays:
674
  soup_text = self.apply_soup(prompt, essay)
675
  if essay == "Prompt.html":
@@ -685,6 +728,8 @@ class HTMLParser:
685
  [
686
  essay,
687
  prompt_folder,
 
 
688
  essay_title,
689
  essay_text,
690
  essay_grades,
 
71
  CSV_HEADER = [
72
  "id",
73
  "id_prompt",
74
+ "prompt",
75
+ "supporting_text",
76
  "title",
77
  "essay",
78
  "grades",
 
140
  {
141
  "id": datasets.Value("string"),
142
  "id_prompt": datasets.Value("string"),
143
+ "prompt": datasets.Value("string"),
144
+ "supporting_text": datasets.Value("string"),
145
  "essay_title": datasets.Value("string"),
146
  "essay_text": datasets.Value("string"),
147
  "grades": datasets.Sequence(datasets.Value("int16")),
 
345
  if self.config.name == "sourceAWithGraders":
346
  grader_a, grader_b = self._parse_graders_data(dirname)
347
  grader_a_data = pd.merge(
348
+ train_df[["id", "id_prompt","essay", "prompt", "supporting_text"]],
349
  grader_a.drop(columns=['essay']),
350
  on=["id", "id_prompt"],
351
  how="inner",
352
  )
353
  grader_b_data = pd.merge(
354
+ train_df[["id", "id_prompt","essay", "prompt", "supporting_text"]],
355
  grader_b.drop(columns=['essay']),
356
  on=["id", "id_prompt"],
357
  how="inner",
 
360
  train_df = pd.concat([train_df, grader_b_data])
361
 
362
  grader_a_data = pd.merge(
363
+ val_df[["id", "id_prompt","essay", "prompt", "supporting_text"]],
364
  grader_a.drop(columns=['essay']),
365
  on=["id", "id_prompt"],
366
  how="inner",
367
  )
368
  grader_b_data = pd.merge(
369
+ val_df[["id", "id_prompt","essay", "prompt", "supporting_text"]],
370
  grader_b.drop(columns=['essay']),
371
  on=["id", "id_prompt"],
372
  how="inner",
 
375
  val_df = pd.concat([val_df, grader_b_data])
376
 
377
  grader_a_data = pd.merge(
378
+ test_df[["id", "id_prompt","essay", "prompt", "supporting_text"]],
379
  grader_a.drop(columns=['essay']),
380
  on=["id", "id_prompt"],
381
  how="inner",
382
  )
383
  grader_b_data = pd.merge(
384
+ test_df[["id", "id_prompt","essay", "prompt", "supporting_text"]],
385
  grader_b.drop(columns=['essay']),
386
  on=["id", "id_prompt"],
387
  how="inner",
 
417
  yield i, {
418
  "id": row["id"],
419
  "id_prompt": row["id_prompt"],
420
+ "prompt": row['prompt'],
421
+ "supporting_text": row["supporting_text"],
422
  "essay_title": row["title"],
423
  "essay_text": row["essay"],
424
  "grades": grades,
 
639
  new_list.append(phrase)
640
  return new_list
641
 
642
+ def _clean_string(self, sentence):
643
+ sentence = sentence.replace("\xa0","").replace("\u200b","")
644
+ sentence = sentence.replace(".",". ").replace("?","? ").replace("!", "! ").replace(")",") ").replace(":",": ").replace("”", "” ")
645
+ sentence = sentence.replace(" ", " ").replace(". . . ", "...")
646
+ sentence = sentence.replace("(editado)", "").replace("(Editado)","")
647
+ sentence = sentence.replace("(editado e adaptado)", "").replace("(Editado e adaptado)", "")
648
+ sentence = sentence.replace(". com. br", ".com.br")
649
+ sentence = sentence.replace("[Veja o texto completo aqui]", "")
650
+ return sentence
651
+
652
+ def _get_supporting_text(self, soup):
653
+ if self.sourceA:
654
+ textos = soup.find_all("ul", class_="article-wording-item")
655
+ resposta = []
656
+ for t in textos[:-1]:
657
+ resposta.append(t.find("h3", class_="item-titulo").get_text().replace("\xa0",""))
658
+ resposta.append(self._clean_string(t.find("div", class_="item-descricao").get_text()))
659
+ return resposta
660
+ else:
661
+ return ""
662
+
663
+ def _get_prompt(self, soup):
664
+ if self.sourceA:
665
+ prompt = soup.find("div", class_="text").find_all("p")
666
+ if len(prompt[0].get_text()) < 2:
667
+ return [prompt[1].get_text().replace("\xa0","")]
668
+ else:
669
+ return [prompt[0].get_text().replace("\xa0","")]
670
+ else:
671
+ return ""
672
+
673
  def parse(self, config_name):
674
  for key, filepath in self.paths_dict.items():
675
  if key != config_name:
 
707
  essay_year = self._get_essay_year(
708
  self.apply_soup(prompt, "Prompt.html")
709
  )
710
+ essay_supporting_text = "\n".join(self._get_supporting_text(
711
+ self.apply_soup(prompt, "Prompt.html")
712
+ ))
713
+ essay_prompt = "\n".join(self._get_prompt(
714
+ self.apply_soup(prompt, "Prompt.html")
715
+ ))
716
  for essay in prompt_essays:
717
  soup_text = self.apply_soup(prompt, essay)
718
  if essay == "Prompt.html":
 
728
  [
729
  essay,
730
  prompt_folder,
731
+ essay_prompt,
732
+ essay_supporting_text,
733
  essay_title,
734
  essay_text,
735
  essay_grades,