|
import os |
|
import random |
|
import json |
|
import pandas as pd |
|
import hashlib |
|
import glob |
|
|
|
|
|
number_to_letter = { |
|
0: "A", |
|
1: "B", |
|
2: "C", |
|
3: "D" |
|
} |
|
|
|
class2id = {'belt': 0, 'breathy': 1, 'inhaled': 2, 'lip_trill': 3, 'spoken': 4, 'straight': 5, 'trill': 6, 'trillo': 7, 'vibrato': 8, 'vocal_fry': 9} |
|
id2class = {v: k for k, v in class2id.items()} |
|
|
|
PATH = "/work/fast_data_yinghao/VocalSet" |
|
data_samples = [] |
|
for split in ["train", "valid", |
|
"test"]: |
|
metadata = pd.read_csv(filepath_or_buffer=os.path.join(PATH, f'{split}_t.txt'), |
|
names = ['audio_path']) |
|
for index in range(metadata.shape[0]): |
|
audio_path = metadata.iloc[index][0] |
|
label = audio_path.split('/')[0] |
|
audioid = f"{audio_path.split('/')[1]}" |
|
data_sample = { |
|
"instruction": "Please recognise the vocal technique in the given audio.", |
|
"input": f"<|SOA|>f'{audioid}'<|EOA|>", |
|
"output": label, |
|
"uuid": audio_path, |
|
"audioid":audio_path, |
|
"split": [split if split != "valid" else "dev"], |
|
"task_type": {"major": ["global_MIR"], "minor": ["vocal_technique_classification"]}, |
|
"domain": "music", |
|
"source": "internet", |
|
"other": {"tag":"null"} |
|
} |
|
|
|
data_samples.append(data_sample) |
|
|
|
|
|
|
|
existed_uuid_list = set() |
|
all_instruments = set(k for k,v in class2id.items()) |
|
for data_sample in data_samples: |
|
|
|
|
|
data_sample["instruction"] = data_sample["instruction"] + " Output from the following options: " |
|
for k,v in class2id.items(): |
|
data_sample["instruction"] += f"{k}, " |
|
data_sample["instruction"] = data_sample["instruction"][:-2] + ". " |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uuid_string = f"{data_sample['instruction']}#{data_sample['input']}#{data_sample['output']}" |
|
unique_id = hashlib.md5(uuid_string.encode()).hexdigest()[:16] |
|
|
|
if unique_id in existed_uuid_list: |
|
sha1_hash = hashlib.sha1(uuid_string.encode()).hexdigest()[:16] |
|
unique_id = hashlib.md5((unique_id + sha1_hash).encode()).hexdigest()[:16] |
|
|
|
existed_uuid_list.add(unique_id) |
|
data_sample["uuid"] = f"{unique_id}" |
|
|
|
|
|
for split in ["train", "dev", "test"]: |
|
if split == "dev": |
|
name = "valid" |
|
else: |
|
name = split |
|
with open(f"VocalSet_{name}.jsonl", 'w') as outfile: |
|
for sample in data_samples: |
|
if sample["split"][0] == split: |
|
json.dump(sample, outfile) |
|
outfile.write('\n') |
|
outfile.close() |
|
|
|
|
|
|
|
|