|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@article{Stan2011442, |
|
author = {Adriana Stan and Junichi Yamagishi and Simon King and |
|
Matthew Aylett}, |
|
title = {The {R}omanian speech synthesis ({RSS}) corpus: |
|
Building a high quality {HMM}-based speech synthesis |
|
system using a high sampling rate}, |
|
journal = {Speech Communication}, |
|
volume = {53}, |
|
number = {3}, |
|
pages = {442--450}, |
|
note = {}, |
|
abstract = {This paper first introduces a newly-recorded high |
|
quality Romanian speech corpus designed for speech |
|
synthesis, called ''RSS'', along with Romanian |
|
front-end text processing modules and HMM-based |
|
synthetic voices built from the corpus. All of these |
|
are now freely available for academic use in order to |
|
promote Romanian speech technology research. The RSS |
|
corpus comprises 3500 training sentences and 500 test |
|
sentences uttered by a female speaker and was recorded |
|
using multiple microphones at 96 kHz sampling |
|
frequency in a hemianechoic chamber. The details of the |
|
new Romanian text processor we have developed are also |
|
given. Using the database, we then revisit some basic |
|
configuration choices of speech synthesis, such as |
|
waveform sampling frequency and auditory frequency |
|
warping scale, with the aim of improving speaker |
|
similarity, which is an acknowledged weakness of |
|
current HMM-based speech synthesisers. As we |
|
demonstrate using perceptual tests, these configuration |
|
choices can make substantial differences to the quality |
|
of the synthetic speech. Contrary to common practice in |
|
automatic speech recognition, higher waveform sampling |
|
frequencies can offer enhanced feature extraction and |
|
improved speaker similarity for HMM-based speech |
|
synthesis.}, |
|
doi = {10.1016/j.specom.2010.12.002}, |
|
issn = {0167-6393}, |
|
keywords = {Speech synthesis, HTS, Romanian, HMMs, Sampling |
|
frequency, Auditory scale}, |
|
url = {http://www.sciencedirect.com/science/article/pii/S0167639310002074}, |
|
year = 2011 |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The Romanian speech synthesis (RSS) corpus was recorded in a hemianechoic chamber (anechoic walls and ceiling; floor partially anechoic) at the University of Edinburgh. We used three high quality studio microphones: a Neumann u89i (large diaphragm condenser), a Sennheiser MKH 800 (small diaphragm condenser with very wide bandwidth) and a DPA 4035 (headset-mounted condenser). Although the current release includes only speech data recorded via Sennheiser MKH 800, we may release speech data recorded via other microphones in the future. All recordings were made at 96 kHz sampling frequency and 24 bits per sample, then downsampled to 48 kHz sampling frequency. For recording, downsampling and bit rate conversion, we used ProTools HD hardware and software. We conducted 8 sessions over the course of a month, recording about 500 sentences in each session. At the start of each session, the speaker listened to a previously recorded sample, in order to attain a similar voice quality and intonation. |
|
""" |
|
|
|
_HOMEPAGE = "http://romaniantts.com/rssdb/" |
|
|
|
_LICENSE = "CCPL" |
|
|
|
_URLS = { |
|
"ro": "RomanianDB_v.0.8.1.tgz", |
|
} |
|
|
|
class RomanianSpeechSynthesis(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("0.8.1") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="ro", version=VERSION, description=""), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "ro" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"sentence": datasets.Value("string"), |
|
"audio": datasets.features.Audio(sampling_rate=48_000), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _URLS[self.config.name] |
|
data_dir = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"datapath": data_dir, |
|
"split": "training", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"datapath": data_dir, |
|
"split": "testing" |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, datapath, split): |
|
key = 0 |
|
audio_folder = "wav" |
|
for folder in (os.listdir(os.path.join(datapath, split, audio_folder))): |
|
with open(os.path.join(datapath, split, "text", folder+".txt")) as text_file: |
|
for line in text_file.readlines(): |
|
i = line[:3] |
|
filename = f"adr_{folder}_{i}.wav" |
|
local_path = os.path.join(split, audio_folder, folder, filename) |
|
yield key, { |
|
"sentence": line[4:-1], |
|
"audio" : os.path.join(datapath, local_path) |
|
} |
|
key += 1 |
|
|