KennethEnevoldsen's picture
update opensubtitles
546c3b3 unverified
from pathlib import Path
from typing import cast
import pandas as pd
import spacy
from datasets import Dataset, load_dataset
# KCE: mail from Leon
sample_to_redact = {
# Der kommer en dag
"opensub_6726481",
"opensub_6732371",
# Kollektivet
"opensub_6645818",
# Flaskepost fra P
"opensub_6666922",
"opensub_6720216",
"opensub_6958711",
# Fasandræberne
"opensub_6036947",
"opensub_6008622",
# En du elsker
"opensub_5828376",
"opensub_5828378",
# En chance til
"opensub_6177523",
# Lev stærkt
"opensub_6467655",
# Nymphomaniac
"opensub_5604391",
"opensub_5748340",
"opensub_5748494",
"opensub_5629516",
# Kvinden i buret
"opensub_5636248",
"opensub_5514603",
"opensub_5504932",
# Den skaldede frisør
"opensub_5084880",
"opensub_5031826",
# Jagten
"opensub_6929419",
"opensub_4885548",
# Melancholia
"opensub_4421330",
"opensub_4406991",
"opensub_4418817",
# Ambassadøren
"opensub_4557721",
# Antichrist
"opensub_5511502",
"opensub_3938655",
"opensub_3636940",
"opensub_3564521",
"opensub_3562215",
# En kongelig affære
"opensub_4725493",
"opensub_4725160",
"opensub_4725159",
"opensub_4916871",
"opensub_5186746",
# Brødre
"opensub_233943",
"opensub_87475",
}
column_order = [
"text",
"source",
"id",
"added",
"created",
"license",
"domain",
"metadata",
]
def convert_sample(example: dict) -> dict:
text = example["text"]
if example["doc_id"] in sample_to_redact:
nlp = spacy.blank("da")
doc = nlp(text)
text = doc[:200].text # first 200 words
new_example = dict(
text_new=text,
id=example["doc_id"],
source="opensubtitles",
domain="Conversation",
license="Creative Commons Legal Code\n\nCC0 1.0 Universal",
added="2025-01-02",
created="1920-01-01, 2018-01-01", # assuming v2018
metadata={"source-pretty": "OpenSubtitles"},
)
return new_example
def main():
ds = load_dataset("DDSC/partial-danish-gigaword-no-twitter", split="train")
ds = cast(Dataset, ds)
ds = ds.filter(lambda x: x["source"] == "opensub", num_proc=4)
ds = ds.map(convert_sample, num_proc=4)
ds = ds.select_columns(column_order[1:] + ["text_new"])
ds = ds.rename_columns({"text_new": "text"})
# ensure order
ds = ds.select_columns(column_order)
df = ds.to_pandas()
df = cast(pd.DataFrame, df)
dedup_df = df.drop_duplicates(keep="first", subset=["text"])
print("N. duplicates: ", df.shape[0] - dedup_df.shape[0]) # 2422
ds = ds.select(dedup_df.index)
assert len(set(ds["text"])) == len(ds)
save_path = Path(__file__).parent / "opensubtitles.parquet"
ds.to_parquet(save_path)
if __name__ == "__main__":
main()