|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""The TEDLIUM dataset for automatic speech recognition.""" |
|
import csv |
|
|
|
import datasets |
|
from datasets.tasks import AutomaticSpeechRecognition |
|
|
|
from huggingface_hub import list_repo_files |
|
|
|
|
|
import pyarrow.parquet as pq |
|
import pyarrow as pa |
|
|
|
|
|
_DESCRIPTION = """\ |
|
The TED-LIUM corpus is English-language TED talks, with transcriptions, sampled at 16kHz. It contains about 118 hours of speech. |
|
""" |
|
|
|
_HOMEPAGE = "https://catalog.ldc.upenn.edu/LDC97S62" |
|
|
|
_LICENSE = "licensed under Creative Commons BY-NC-ND 3.0 (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en)" |
|
|
|
_DATA_REPO_ID = "sanchit-gandhi/tedlium-data" |
|
|
|
_WHISPER_TRANSCRIPT_URL = "https://huggingface.co/datasets/distil-whisper/whisper_transcriptions_greedy/resolve/main/tedlium" |
|
|
|
_WHISPER_TRANSCRIPT_URLs = _WHISPER_TRANSCRIPT_URL + "/{split}-transcription.csv" |
|
|
|
class TedLium(datasets.ArrowBasedBuilder): |
|
"""The TED-LIUM corpus is English-language TED talks, with transcriptions, sampled at 16kHz. It contains about 118 hours of speech.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "release3" |
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="release3", version=VERSION, description=_DESCRIPTION), |
|
] |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"audio": datasets.features.Audio(sampling_rate=16_000), |
|
"text": datasets.Value("string"), |
|
"speaker_id": datasets.Value("string"), |
|
"gender": datasets.features.ClassLabel(names=["unknown", "female", "male"]), |
|
"file": datasets.Value("string"), |
|
"id": datasets.Value("string"), |
|
"whisper_transcript": datasets.Value("string"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=("audio", "text"), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_repo_download = f"https://huggingface.co/datasets/{_DATA_REPO_ID}/resolve/main/" |
|
all_files = list_repo_files(_DATA_REPO_ID, repo_type="dataset") |
|
|
|
train_files = [file for file in all_files if file.startswith("data/train")] |
|
validation_files = [file for file in all_files if file.startswith("data/validation")] |
|
test_files = [file for file in all_files if file.startswith("data/test")] |
|
|
|
split_to_ids = { |
|
"train": train_files, |
|
"validation": validation_files, |
|
"test": test_files, |
|
} |
|
|
|
dl_urls = {} |
|
for split, split_ids in split_to_ids.items(): |
|
dl_urls[split] = [data_repo_download + source_id for source_id in split_ids] |
|
archive_paths = dl_manager.download(dl_urls) |
|
|
|
local_extracted_archive_paths = ( |
|
dl_manager.extract(archive_paths) |
|
if not dl_manager.is_streaming |
|
else {split: [None] * len(archive_paths[split]) for split in split_to_ids} |
|
) |
|
|
|
transcription_urls = {split: _WHISPER_TRANSCRIPT_URLs.format(split=split.replace(".", "-")) for split in split_to_ids} |
|
transcript_archive_path = dl_manager.download(transcription_urls) |
|
|
|
train_split = [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"local_extracted_archive_paths": local_extracted_archive_paths["train"], |
|
"archives": [dl_manager.iter_files(path) for path in archive_paths["train"]], |
|
"whisper_transcript": transcript_archive_path["train"], |
|
}, |
|
), |
|
] |
|
dev_split = [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"local_extracted_archive_paths": local_extracted_archive_paths["validation"], |
|
"archives": [dl_manager.iter_files(path) for path in archive_paths["validation"]], |
|
"whisper_transcript": transcript_archive_path["validation"], |
|
}, |
|
), |
|
] |
|
test_split = [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"local_extracted_archive_paths": local_extracted_archive_paths["test"], |
|
"archives": [dl_manager.iter_files(path) for path in archive_paths["test"]], |
|
"whisper_transcript": transcript_archive_path["test"], |
|
}, |
|
), |
|
] |
|
return train_split + dev_split + test_split |
|
|
|
def _generate_tables(self, local_extracted_archive_paths, archives, whisper_transcript): |
|
whisper_transcriptions = dict() |
|
with open(whisper_transcript, encoding="utf-8") as f: |
|
reader = csv.DictReader(f, delimiter=",") |
|
for line in reader: |
|
whisper_transcriptions[line["file_id"]] = line["whisper_transcript"] |
|
|
|
idx = 0 |
|
for local_extracted_archive_path, archive in zip(local_extracted_archive_paths, archives): |
|
|
|
for audio_file in archive: |
|
with open(audio_file, "rb") as f: |
|
pf = pq.ParquetFile(f) |
|
for record_batch in pf.iter_batches(): |
|
pa_table = pa.Table.from_batches([record_batch]) |
|
|
|
batch_whisper_transcript = [] |
|
for text, file_id in zip(pa_table["text"], pa_table["id"]): |
|
transcription = whisper_transcriptions.get(str(file_id), None) |
|
batch_whisper_transcript.append(transcription if str(text) != "ignore_time_segment_in_scoring" else "ignore_time_segment_in_scoring") |
|
|
|
batch_whisper_transcript = pa.array(batch_whisper_transcript, pa.string()) |
|
pa_table = pa_table.append_column("whisper_transcript", batch_whisper_transcript) |
|
yield idx, pa_table |
|
idx += 1 |
|
|