trevor / trevor.py
dcassine's picture
Upload trevor.py
c920ba0
raw
history blame
7.91 kB
from collections import defaultdict
import os
import json
import csv
import datasets
_DESCRIPTION = """
A small-scale single lang.
"""
_CITATION = """
@inproceedings{wang-etal-2021-voxpopuli,
title = "trevor",
author = "diego",
booktitle = "copy of voxpopuli",
month = aug,
year = "2023",
publisher = "None",
url = "",
}
"""
_HOMEPAGE = ""
_LICENSE = "None"
_ASR_LANGUAGES = [
"en"
]
_ASR_ACCENTED_LANGUAGES = [
"en_accented"
]
_LANGUAGES = _ASR_LANGUAGES
_BASE_DATA_DIR = "data/"
_N_SHARDS_FILE = _BASE_DATA_DIR + "n_files.json"
_AUDIO_ARCHIVE_PATH = _BASE_DATA_DIR + "{lang}/{split}/{split}_part_{n_shard}.tar.gz"
_METADATA_PATH = _BASE_DATA_DIR + "{lang}/asr_{split}.tsv"
class TrevorConfig(datasets.BuilderConfig):
"""BuilderConfig for Trevor."""
def __init__(self, name, languages="all", **kwargs):
"""
Args:
name: `string` or `List[string]`:
name of a config: either one of the supported languages or "multilang" for many languages.
By default, "multilang" config includes all languages, including accented ones.
To specify a custom set of languages, pass them to the `languages` parameter
languages: `List[string]`: if config is "multilang" can be either "all" for all available languages,
excluding accented ones (default), or a custom list of languages.
**kwargs: keyword arguments forwarded to super.
"""
if name == "multilang":
self.languages = _ASR_LANGUAGES if languages == "all" else languages
name = "multilang" if languages == "all" else "_".join(languages)
else:
self.languages = [name]
super().__init__(name=name, **kwargs)
class Trevor(datasets.GeneratorBasedBuilder):
"""The Trevor dataset."""
VERSION = datasets.Version("1.3.0") # TODO: version
BUILDER_CONFIGS = [
TrevorConfig(
name=name,
version=datasets.Version("1.3.0"),
)
for name in _LANGUAGES + ["multilang"]
]
DEFAULT_WRITER_BATCH_SIZE = 256
def _info(self):
features = datasets.Features(
{
"audio_id": datasets.Value("string"),
"language": datasets.ClassLabel(names=_LANGUAGES),
"audio": datasets.Audio(sampling_rate=16_000),
"raw_text": datasets.Value("string"),
"normalized_text": datasets.Value("string"),
"gender": datasets.Value("string"), # TODO: ClassVar?
"speaker_id": datasets.Value("string"),
"is_gold_transcript": datasets.Value("bool"),
"accent": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
n_shards_path = dl_manager.download_and_extract(_N_SHARDS_FILE)
with open(n_shards_path) as f:
n_shards = json.load(f)
if self.config.name == "en_accented":
splits = ["test"]
else:
splits = ["train", "dev", "test"]
audio_urls = defaultdict(dict)
for split in splits:
for lang in self.config.languages:
audio_urls[split][lang] = [
_AUDIO_ARCHIVE_PATH.format(lang=lang, split=split, n_shard=i) for i in range(n_shards[lang][split])
]
meta_urls = defaultdict(dict)
for split in splits:
for lang in self.config.languages:
meta_urls[split][lang] = _METADATA_PATH.format(lang=lang, split=split)
# dl_manager.download_config.num_proc = len(urls)
meta_paths = dl_manager.download_and_extract(meta_urls)
audio_paths = dl_manager.download(audio_urls)
local_extracted_audio_paths = (
dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
{
split: {lang: [None] * len(audio_paths[split][lang]) for lang in self.config.languages} for split in splits
}
)
if self.config.name == "en_accented":
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"audio_archives": {
lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
for lang, lang_archives in audio_paths["test"].items()
},
"local_extracted_archives_paths": local_extracted_audio_paths["test"],
"metadata_paths": meta_paths["test"],
}
),
]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"audio_archives": {
lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
for lang, lang_archives in audio_paths["train"].items()
},
"local_extracted_archives_paths": local_extracted_audio_paths["train"],
"metadata_paths": meta_paths["train"],
}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"audio_archives": {
lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
for lang, lang_archives in audio_paths["dev"].items()
},
"local_extracted_archives_paths": local_extracted_audio_paths["dev"],
"metadata_paths": meta_paths["dev"],
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"audio_archives": {
lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
for lang, lang_archives in audio_paths["test"].items()
},
"local_extracted_archives_paths": local_extracted_audio_paths["test"],
"metadata_paths": meta_paths["test"],
}
),
]
def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
assert len(metadata_paths) == len(audio_archives) == len(local_extracted_archives_paths)
features = ["raw_text", "normalized_text", "speaker_id", "gender", "is_gold_transcript", "accent"]
for lang in self.config.languages:
assert len(audio_archives[lang]) == len(local_extracted_archives_paths[lang])
meta_path = metadata_paths[lang]
print(f"Opening meta file {meta_path}")
with open(meta_path) as f:
metadata = {x["id"]: x for x in csv.DictReader(f, delimiter="\t")}
for audio_archive, local_extracted_archive_path in zip(audio_archives[lang], local_extracted_archives_paths[lang]):
for audio_filename, audio_file in audio_archive:
audio_id = audio_filename.split(os.sep)[-1].split(".wav")[0]
path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
yield audio_id, {
"audio_id": audio_id,
"language": lang,
**{feature: metadata[audio_id][feature] for feature in features},
"audio": {"path": path, "bytes": audio_file.read()},
}