|
from flair.data import Sentence |
|
from flair.embeddings import TransformerWordEmbeddings |
|
|
|
from pathlib import Path |
|
|
|
from typing import List |
|
|
|
|
|
def prepare_ajmc_corpus( |
|
file_in: Path, file_out: Path, eos_marker: str, document_separator: str, add_document_separator: bool |
|
): |
|
with open(file_in, "rt") as f_p: |
|
lines = f_p.readlines() |
|
|
|
with open(file_out, "wt") as f_out: |
|
|
|
f_out.write(lines[0] + "\n") |
|
|
|
for line in lines[1:]: |
|
if line.startswith(" \t"): |
|
|
|
continue |
|
|
|
line = line.strip() |
|
|
|
|
|
|
|
line = line.replace("ſ", "s") |
|
|
|
|
|
if add_document_separator and line.startswith(document_separator): |
|
f_out.write("-DOCSTART- O\n\n") |
|
|
|
f_out.write(line + "\n") |
|
|
|
if eos_marker in line: |
|
f_out.write("\n") |
|
|
|
print("Special preprocessing for AJMC has finished!") |
|
|
|
|
|
def prepare_clef_2020_corpus( |
|
file_in: Path, file_out: Path, eos_marker: str, document_separator: str, add_document_separator: bool |
|
): |
|
with open(file_in, "rt") as f_p: |
|
original_lines = f_p.readlines() |
|
|
|
lines = [] |
|
|
|
|
|
lines.append(original_lines[0]) |
|
|
|
for line in original_lines[1:]: |
|
if line.startswith(" \t"): |
|
|
|
continue |
|
|
|
line = line.strip() |
|
|
|
|
|
if add_document_separator and line.startswith(document_separator): |
|
lines.append("-DOCSTART- O") |
|
lines.append("") |
|
|
|
lines.append(line) |
|
|
|
if eos_marker in line: |
|
lines.append("") |
|
|
|
|
|
word_seperator = "¬" |
|
|
|
for index, line in enumerate(lines): |
|
if line.startswith("#"): |
|
continue |
|
|
|
if line.startswith(word_seperator): |
|
continue |
|
|
|
if not line: |
|
continue |
|
|
|
prev_line = lines[index - 1] |
|
|
|
prev_prev_line = lines[index - 2] |
|
|
|
if not prev_line.startswith(word_seperator): |
|
continue |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
suffix = line.split("\t")[0] |
|
|
|
prev_prev_line_splitted = lines[index - 2].split("\t") |
|
prev_prev_line_splitted[0] += suffix |
|
|
|
prev_line_splitted = lines[index - 1].split("\t") |
|
prev_line_splitted[0] = "#" + prev_line_splitted[0] |
|
prev_line_splitted[-1] += "|Commented" |
|
|
|
current_line_splitted = line.split("\t") |
|
current_line_splitted[0] = "#" + current_line_splitted[0] |
|
current_line_splitted[-1] += "|Commented" |
|
|
|
|
|
|
|
prev_prev_line_splitted[9] += f"|Dehyphenated-{len(suffix)}" |
|
|
|
lines[index - 2] = "\t".join(prev_prev_line_splitted) |
|
lines[index - 1] = "\t".join(prev_line_splitted) |
|
lines[index] = "\t".join(current_line_splitted) |
|
|
|
|
|
for index, line in enumerate(lines): |
|
if not line: |
|
continue |
|
|
|
if not line.startswith(word_seperator): |
|
continue |
|
|
|
|
|
current_line_splitted = line.split("\t") |
|
current_line_splitted[0] = "#" + current_line_splitted[0] |
|
|
|
current_line_splitted[-1] += "|Commented" |
|
|
|
lines[index] = "\t".join(current_line_splitted) |
|
|
|
|
|
|
|
for index, line in enumerate(lines): |
|
if not line: |
|
continue |
|
|
|
if not line.startswith("#"): |
|
continue |
|
|
|
current_line_splitted = line.split("\t") |
|
|
|
if current_line_splitted[-1] == "_|Commented": |
|
current_line_splitted[-1] = "Commented" |
|
lines[index] = "\t".join(current_line_splitted) |
|
|
|
|
|
with open(file_out, "wt") as f_out: |
|
for line in lines: |
|
f_out.write(line + "\n") |
|
|
|
|
|
def prepare_newseye_fi_sv_corpus( |
|
file_in: Path, file_out: Path, eos_marker: str, document_separator: str, add_document_separator: bool |
|
): |
|
with open(file_in, "rt") as f_p: |
|
original_lines = f_p.readlines() |
|
|
|
lines = [] |
|
|
|
|
|
lines.append(original_lines[0]) |
|
|
|
for line in original_lines[1:]: |
|
if line.startswith(" \t"): |
|
|
|
continue |
|
|
|
line = line.strip() |
|
|
|
|
|
if add_document_separator and line.startswith(document_separator): |
|
lines.append("-DOCSTART- O") |
|
lines.append("") |
|
|
|
lines.append(line) |
|
|
|
if eos_marker in line: |
|
lines.append("") |
|
|
|
|
|
|
|
word_seperator = "-\t" |
|
|
|
for index, line in enumerate(lines): |
|
if line.startswith("#"): |
|
continue |
|
|
|
if line.startswith(word_seperator): |
|
continue |
|
|
|
if not line: |
|
continue |
|
|
|
prev_line = lines[index - 1] |
|
|
|
prev_prev_line = lines[index - 2] |
|
|
|
if not prev_line.startswith(word_seperator): |
|
continue |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not "NoSpaceAfter" in prev_line: |
|
continue |
|
|
|
if not prev_prev_line: |
|
continue |
|
|
|
suffix = line.split("\t")[0] |
|
|
|
prev_prev_line_splitted = lines[index - 2].split("\t") |
|
prev_prev_line_splitted[0] += suffix |
|
|
|
prev_line_splitted = lines[index - 1].split("\t") |
|
prev_line_splitted[0] = "# " + prev_line_splitted[0] |
|
prev_line_splitted[-1] += "|Commented" |
|
|
|
current_line_splitted = line.split("\t") |
|
current_line_splitted[0] = "# " + current_line_splitted[0] |
|
current_line_splitted[-1] += "|Commented" |
|
|
|
|
|
|
|
prev_prev_line_splitted[9] += f"|Dehyphenated-{len(suffix)}" |
|
|
|
lines[index - 2] = "\t".join(prev_prev_line_splitted) |
|
lines[index - 1] = "\t".join(prev_line_splitted) |
|
lines[index] = "\t".join(current_line_splitted) |
|
|
|
|
|
for index, line in enumerate(lines): |
|
if not line: |
|
continue |
|
|
|
if not line.startswith(word_seperator): |
|
continue |
|
|
|
|
|
current_line_splitted = line.split("\t") |
|
current_line_splitted[0] = "# " + current_line_splitted[0] |
|
|
|
current_line_splitted[-1] += "|Commented" |
|
|
|
lines[index] = "\t".join(current_line_splitted) |
|
|
|
|
|
|
|
for index, line in enumerate(lines): |
|
if not line: |
|
continue |
|
|
|
if not line.startswith("#"): |
|
continue |
|
|
|
current_line_splitted = line.split("\t") |
|
|
|
if current_line_splitted[-1] == "_|Commented": |
|
current_line_splitted[-1] = "Commented" |
|
lines[index] = "\t".join(current_line_splitted) |
|
|
|
|
|
with open(file_out, "wt") as f_out: |
|
for line in lines: |
|
f_out.write(line + "\n") |
|
|
|
|
|
def prepare_newseye_de_fr_corpus( |
|
file_in: Path, file_out: Path, eos_marker: str, document_separator: str, add_document_separator: bool |
|
): |
|
with open(file_in, "rt") as f_p: |
|
original_lines = f_p.readlines() |
|
|
|
lines = [] |
|
|
|
|
|
lines.append(original_lines[0]) |
|
|
|
for line in original_lines[1:]: |
|
if line.startswith(" \t"): |
|
|
|
continue |
|
|
|
line = line.strip() |
|
|
|
|
|
if add_document_separator and line.startswith(document_separator): |
|
lines.append("-DOCSTART- O") |
|
lines.append("") |
|
|
|
lines.append(line) |
|
|
|
if eos_marker in line: |
|
lines.append("") |
|
|
|
|
|
word_seperator = "¬" |
|
|
|
for index, line in enumerate(lines): |
|
if line.startswith("#"): |
|
continue |
|
|
|
if not line: |
|
continue |
|
|
|
last_line = lines[index - 1] |
|
last_line_splitted = last_line.split("\t") |
|
|
|
if not last_line_splitted[0].endswith(word_seperator): |
|
continue |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
suffix = last_line.split("\t")[0].replace(word_seperator, "") |
|
|
|
prefix_length = len(line.split("\t")[0]) |
|
|
|
|
|
|
|
|
|
last_line_splitted[0] = suffix + line.split("\t")[0] |
|
|
|
last_line_splitted[9] += f"|Dehyphenated-{prefix_length}" |
|
|
|
current_line_splitted = line.split("\t") |
|
current_line_splitted[0] = "# " + current_line_splitted[0] |
|
current_line_splitted[-1] += "|Commented" |
|
|
|
lines[index - 1] = "\t".join(last_line_splitted) |
|
lines[index] = "\t".join(current_line_splitted) |
|
|
|
|
|
|
|
for index, line in enumerate(lines): |
|
if not line: |
|
continue |
|
|
|
if not line.startswith("#"): |
|
continue |
|
|
|
current_line_splitted = line.split("\t") |
|
|
|
if current_line_splitted[-1] == "_|Commented": |
|
current_line_splitted[-1] = "Commented" |
|
lines[index] = "\t".join(current_line_splitted) |
|
|
|
|
|
with open(file_out, "wt") as f_out: |
|
for line in lines: |
|
f_out.write(line + "\n") |
|
|
|
print("Special preprocessing for German/French NewsEye dataset has finished!") |
|
|
|
|