Datasets:
CZLC
/

Modalities:
Text
Formats:
json
Languages:
Czech
Libraries:
Datasets
pandas
License:
CNC_KSK / convert_ksk.py
mfajcik's picture
Upload 2 files
b292499 verified
TARGET = ".data/ksk-dopisy.vert.shuffled"
import os
import re
from typing import Dict
import jsonlines
from tqdm import tqdm
def process_vert_format(vert_content: str) -> Dict[str, str]:
doc_pattern = re.compile(r'<doc[^>]*>.*?</doc>', re.DOTALL)
# Pattern to match document boundaries and extract metadata
metadata_pattern = re.compile(
r'<doc id="([^"]*)"\s+signatura="([^"]*)"\s+archiv="([^"]*)"\s+pispohl="([^"]*)"\s+pisvek="([^"]*)"\s+pisvzdel="([^"]*)"\s+pispobyt="([^"]*)"\s+pister="([^"]*)"\s+vztah="([^"]*)"\s+adrpohl="([^"]*)"\s+adrvek="([^"]*)"\s+adrvzdel="([^"]*)"\s+rok="([^"]*)"\s+forma="([^"]*)">'
)
block_pattern = re.compile(r'<block[^>]*>.*?</block>', re.DOTALL)
note_pattern = re.compile(r'<note text="([^"]*)"/>\s*@')
# Pattern to remove whitespace before punctuation
ws_before_punct = re.compile(r'\s+([.,!?:;])')
# Find all documents
documents = re.findall(doc_pattern, vert_content)
processed_documents = {}
for doc in tqdm(documents):
# Extract metadata
metadata_match = re.search(metadata_pattern, doc)
if metadata_match:
# r'<doc id="([^"]*)".+?rok="([^"]*)".+?misto="([^"]*)" sidlotyp="([^"]*)".+?tema="([^"]*)" pocetml="([^"]*)"')
doc_id = metadata_match.group(1)
signatura = metadata_match.group(2)
archiv = metadata_match.group(3)
pispohl = metadata_match.group(4)
pisvek = metadata_match.group(5)
pisvzdel = metadata_match.group(6)
pispobyt = metadata_match.group(7)
pister = metadata_match.group(8)
vztah = metadata_match.group(9)
adrpohl = metadata_match.group(10)
adrvek = metadata_match.group(11)
adrvzdel = metadata_match.group(12)
rok = metadata_match.group(13)
forma = metadata_match.group(14)
pispohl = "Žena" if pispohl == "F" else ("Muž" if pispohl == "M" else pispohl)
adrpohl = "Žena" if adrpohl == "F" else ("Muž" if adrpohl == "M" else adrpohl)
metadata_str = (f"Pohlaví pisatele: {pispohl}, "
f"Pobyt pisatele: {pispobyt}, "
f"Počet dětí pisatele: {pister}, "
f"Pohlaví adresáta: {adrpohl}, "
f"Rok: {rok}, ")
else:
raise ValueError("Metadata not found in document")
for bid, block in enumerate(re.findall(block_pattern, doc)):
# replace notes followed by tags in the document just with notes
block = note_pattern.sub(r'@\1@', block)
# remove tags from each line, and join text
tokens = [line.split("\t")[0].strip() for line in block.split("\n") if line.strip() != ""]
doc_text = " ".join(tokens)
# remove any text with <...> tag
doc_text = re.sub(r'<[^>]*>', '', doc_text)
# replace more than one space with one space
doc_text = re.sub(r'\s+', ' ', doc_text).strip()
# remove whitespace before ., !, ?
doc_text = re.sub(ws_before_punct, r'\1', doc_text)
# - sometimes lines in oral are empty? e.g. 08A009N // REMOVE THESE LINES
if doc_text.strip() == "":
continue
processed_documents[f"{doc_id}_{bid}"] = metadata_str + "\n" + doc_text
return processed_documents
# Read the content from the file
with open(TARGET, "r") as f:
vert_content = f.read()
# Process the content
processed_documents = process_vert_format(vert_content)
# write all splits into same json file in .data/hf_dataset/cnc_fictree/test.jsonl
OF = ".data/hf_dataset/cnc_ksk/test.jsonl"
os.makedirs(os.path.dirname(OF), exist_ok=True)
with jsonlines.open(OF, "w") as writer:
for doc_id, doc in list(processed_documents.items()):
writer.write({"text": doc, "id": doc_id})