File size: 4,667 Bytes
bb01eca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import json
import argparse
from os.path import join, exists, isfile
from os import makedirs, listdir
import re
import hashlib
class DataSet:
def __init__(self, name, workspace):
self.segments = []
self.spk2gender = []
self.text = []
self.utt2spk = []
self.wavscp = []
self.workspace = join(workspace, name)
def add_utterance(self, utt, recording):
text = utt["text"]
arrangement, performance, country, gender, user = recording[:-4].split("-")
# the following mapping is necessary for errors in gender in country IN
insensitive_none = re.compile(re.escape('none'), re.IGNORECASE)
gender = insensitive_none.sub('', utt["gender"])
spk = "{}{}".format(insensitive_none.sub('', gender).upper(), insensitive_none.sub('', user))
rec_id = recording[:-4]
utt_id = "{}-{}-{}-{}-{}-{:03}".format(spk, arrangement, performance, country, gender.upper(), utt["index"])
start = utt["start"]
end = utt["end"]
wavpath = join(country, "{}{}".format(country, "Vocals"), recording)
self._add_segment(utt_id, rec_id, start, end)
self._add_spk2gender(spk, gender)
self._add_text(utt_id, text)
self._add_utt2spk(utt_id, spk)
self._add_wavscp(rec_id, wavpath)
def _add_segment(self, rec_id, utt_id, start, end):
self.segments.append("{} {} {:.3f} {:.3f}".format(rec_id, utt_id, start, end))
def _add_spk2gender(self, spk, gender):
self.spk2gender.append("{} {}".format(spk, gender))
def _add_text(self, utt_id, text):
self.text.append("{} {}".format(utt_id, text))
def _add_utt2spk(self, utt_id, spk):
self.utt2spk.append("{} {}".format(utt_id, spk))
def _add_wavscp(self, rec_id, wavpath):
self.wavscp.append("{} sox wav/{} -G -t wav -r 16000 -c 1 - remix 1 | ".format(rec_id, wavpath))
def list2file(self, outfile, list_data):
list_data = list(set(list_data))
with open(outfile, "w") as f:
for line in list_data:
f.write("{}\n".format(line))
def save(self):
if not exists(self.workspace):
makedirs(self.workspace)
self.list2file(join(self.workspace, "spk2gender"), sorted(self.spk2gender))
self.list2file(join(self.workspace, "text"), sorted(self.text))
self.list2file(join(self.workspace, "wav.scp"), sorted(self.wavscp))
self.list2file(join(self.workspace, "utt2spk"), sorted(self.utt2spk))
self.list2file(join(self.workspace, "segments"), sorted(self.segments))
def read_json(filepath):
try: # Read the json
with open(filepath) as data_file:
data = json.load(data_file)
except json.decoder.JSONDecodeError: # Json has an extra first line. Error when was created
data = []
return data
def map_rec2chec(db_path, countries):
"""
Method read all the original audio tracks and create a dict {<checksum>: <recording>}
:param db_path: string, path to root of DAMP Sing!
:return: dict
"""
rec2chec = {}
for country in countries:
recordings = [f for f in listdir(join(db_path, country, country + "Vocals")) if f.endswith(".m4a")]
for record in recordings:
rec2chec[hashlib.md5(open(join(db_path, country, country + "Vocals", record), 'rb').read()).hexdigest()] = record
return rec2chec
def main(args):
db_path = args.db_path
workspace = args.workspace
utts_path = args.utterances
dset = args.dset
countries = ["GB"]
countries += ["US", "AU"] if dset in ["train3", "train30"] else []
countries += ['AE', 'AR', 'BR', 'CL', 'CN', 'DE', 'ES', 'FR', 'HU',
'ID', 'IN', 'IQ', 'IR', 'IT', 'JP', 'KR', 'MX', 'MY',
'NO', 'PH', 'PT', 'RU', 'SA', 'SG', 'TH', 'VN', 'ZA'] if dset in ["train30"] else []
performances = map_rec2chec(db_path, countries)
utterances = read_json(utts_path)
dataset = DataSet(dset, workspace)
for utt in utterances:
dataset.add_utterance(utt, performances[utt["wavfile"]])
dataset.save()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("workspace", type=str, help="Path where the output files will be saved")
parser.add_argument("db_path", type=str, help="Path to DAMP 300x30x2 database")
parser.add_argument("utterances", type=str, help="Path to utterance details in json format",
default="metadata.json")
parser.add_argument("dset", type=str, help="Name of the dataset")
args = parser.parse_args()
main(args)
|