|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TV3Parla.""" |
|
|
|
import re |
|
|
|
import datasets |
|
from datasets.tasks import AutomaticSpeechRecognition |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{kulebi18_iberspeech, |
|
author={Baybars Külebi and Alp Öktem}, |
|
title={{Building an Open Source Automatic Speech Recognition System for Catalan}}, |
|
year=2018, |
|
booktitle={Proc. IberSPEECH 2018}, |
|
pages={25--29}, |
|
doi={10.21437/IberSPEECH.2018-6} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This corpus includes 240 hours of Catalan speech from broadcast material. |
|
The details of segmentation, data processing and also model training are explained in Külebi, Öktem; 2018. |
|
The content is owned by Corporació Catalana de Mitjans Audiovisuals, SA (CCMA); |
|
we processed their material and hereby making it available under their terms of use. |
|
|
|
This project was supported by the Softcatalà Association. |
|
""" |
|
|
|
_HOMEPAGE = "https://collectivat.cat/asr#tv3parla" |
|
|
|
_LICENSE = "Creative Commons Attribution-NonCommercial 4.0 International" |
|
|
|
_REPO = "https://huggingface.co/datasets/collectivat/tv3_parla/resolve/main/" |
|
_URLS = { |
|
"transcripts": _REPO + "tv3_0.3_{split}.transcription", |
|
"audio": _REPO + "tv3_0.3.tar.gz", |
|
} |
|
_SPLITS = [datasets.Split.TRAIN, datasets.Split.TEST] |
|
|
|
_PATTERN = re.compile(r"^<s> (?P<text>.+) </s> \((?P<id>\S+)\)$") |
|
|
|
|
|
class Tv3Parla(datasets.GeneratorBasedBuilder): |
|
"""TV3Parla.""" |
|
|
|
VERSION = datasets.Version("0.3.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"path": datasets.Value("string"), |
|
"audio": datasets.features.Audio(), |
|
"text": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
task_templates=[ |
|
AutomaticSpeechRecognition(transcription_column="text") |
|
], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = { |
|
split: {key: url.format(split=split) for key, url in _URLS.items()} for split in _SPLITS |
|
} |
|
dl_dir = dl_manager.download(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=split, |
|
gen_kwargs={ |
|
"transcripts_path": dl_dir[split]["transcripts"], |
|
"audio_files": dl_manager.iter_archive(dl_dir[split]["audio"]), |
|
"split": split, |
|
}, |
|
) for split in _SPLITS |
|
] |
|
|
|
def _generate_examples(self, transcripts_path, audio_files, split): |
|
transcripts = {} |
|
with open(transcripts_path, encoding="utf-8") as transcripts_file: |
|
for line in transcripts_file: |
|
match = _PATTERN.match(line) |
|
transcripts[match["id"]] = match["text"] |
|
|
|
for key, (path, file) in enumerate(audio_files): |
|
if path.endswith(".wav") and f"/{split}/" in path: |
|
uid = path.split("/")[-1][:-4] |
|
if uid not in transcripts: |
|
continue |
|
text = transcripts.pop(uid) |
|
audio = {"path": path, "bytes": file.read()} |
|
yield key, {"path": path, "audio": audio, "text": text} |
|
|