|
"""Script used to filter malformed examples from the original SCAT corpus. |
|
|
|
To run, copy original SCAT files from https://github.com/neulab/contextual-mt/tree/master/data/scat under the same |
|
path of the script. Filtered files will be created in the filtered_scat folder. |
|
|
|
Uncomment lines to save dropped malformed sentences into separate files for inspection. |
|
""" |
|
|
|
import re |
|
from pathlib import Path |
|
|
|
def drop_malformed_tags( |
|
split: str, |
|
save_folder: str = "filtered_scat", |
|
): |
|
find_tag_pattern = r"(<hon>|<\/?p>|<hoff>)" |
|
nested_uninformative_pattern = r"(<hon>\W*(<p>[^<]*</p>)\W*<hoff>)" |
|
|
|
with open(f"highlighted.{split}.context.en") as f: |
|
orig_ctx_en = f.readlines() |
|
with open(f"highlighted.{split}.context.fr") as f: |
|
orig_ctx_fr = f.readlines() |
|
with open(f"highlighted.{split}.en") as f: |
|
orig_tgt_en = f.readlines() |
|
with open(f"highlighted.{split}.fr") as f: |
|
orig_tgt_fr = f.readlines() |
|
|
|
print("# of context examples: EN -", len(orig_ctx_en), "FR -", len(orig_ctx_fr)) |
|
print("# of target examples: EN -", len(orig_tgt_en), "FR -", len(orig_tgt_fr)) |
|
|
|
ctx_en = [] |
|
ctx_fr = [] |
|
tgt_en = [] |
|
tgt_fr = [] |
|
|
|
|
|
|
|
|
|
|
|
|
|
for ex_idx in range(len(orig_ctx_en)): |
|
drop = False |
|
txt_list = [orig_ctx_en[ex_idx], orig_tgt_en[ex_idx], orig_ctx_fr[ex_idx], orig_tgt_fr[ex_idx]] |
|
if not ( |
|
"<p>" in txt_list[1] and "</p>" in txt_list[1] and "<p>" in txt_list[3] and "</p>" in txt_list[3] and |
|
"<p>" not in txt_list[0] and "</p>" not in txt_list[0] and "<p>" not in txt_list[2] and "</p>" not in txt_list[2] |
|
): |
|
drop = True |
|
|
|
|
|
|
|
for i in range(len(txt_list)): |
|
for uninformative_match, nested_tag in re.findall(nested_uninformative_pattern, txt_list[i]): |
|
txt_list[i] = txt_list[i].replace(uninformative_match, nested_tag) |
|
txt = " ".join(txt_list) |
|
|
|
matches = [(m.group(0),) + m.span() for m in re.finditer(find_tag_pattern, txt)] |
|
|
|
if not drop: |
|
if len(matches) > 0 and len(matches) % 2 == 0: |
|
for match_idx in range(0, len(matches), 2): |
|
|
|
|
|
|
|
|
|
|
|
if not ( |
|
(matches[match_idx][0] == "<hon>" and matches[match_idx+1][0] == "<hoff>") or |
|
(matches[match_idx][0] == "<p>" and matches[match_idx+1][0] == "</p>") or |
|
(matches[match_idx][2] < matches[match_idx+1][1]) |
|
): |
|
drop = True |
|
break |
|
else: |
|
drop = True |
|
if not drop: |
|
ctx_en.append(txt_list[0]) |
|
ctx_fr.append(txt_list[2]) |
|
tgt_en.append(txt_list[1]) |
|
tgt_fr.append(txt_list[3]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("# of dropped examples:", len(orig_ctx_en) - len(ctx_en)) |
|
print("# of filtered examples:", len(ctx_en)) |
|
|
|
save_folder = Path(save_folder) |
|
save_folder.mkdir(parents=True, exist_ok=True) |
|
with open(save_folder / f"filtered.{split}.context.en", "w") as f: |
|
f.writelines(ctx_en) |
|
with open(save_folder / f"filtered.{split}.context.fr", "w") as f: |
|
f.writelines(ctx_fr) |
|
with open(save_folder / f"filtered.{split}.en", "w") as f: |
|
f.writelines(tgt_en) |
|
with open(save_folder / f"filtered.{split}.fr", "w") as f: |
|
f.writelines(tgt_fr) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("Files written to the filtered_scat folder") |
|
|
|
if __name__ == "__main__": |
|
drop_malformed_tags("train") |
|
drop_malformed_tags("valid") |
|
drop_malformed_tags("test") |