File size: 3,640 Bytes
19cbd3f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import os
import random
import json
import pandas as pd
import hashlib
import glob
number_to_letter = {
0: "A",
1: "B",
2: "C",
3: "D"
}
class2id = {'belt': 0, 'breathy': 1, 'inhaled': 2, 'lip_trill': 3, 'spoken': 4, 'straight': 5, 'trill': 6, 'trillo': 7, 'vibrato': 8, 'vocal_fry': 9}
id2class = {v: k for k, v in class2id.items()}
PATH = "/work/fast_data_yinghao/VocalSet"
data_samples = []
for split in ["train", "valid",
"test"]:
metadata = pd.read_csv(filepath_or_buffer=os.path.join(PATH, f'{split}_t.txt'),
names = ['audio_path'])
for index in range(metadata.shape[0]):
audio_path = metadata.iloc[index][0]
label = audio_path.split('/')[0]
audioid = f"{audio_path.split('/')[1]}"
data_sample = {
"instruction": "Please recognise the vocal technique in the given audio.",
"input": f"<|SOA|>f'{audioid}'<|EOA|>",
"output": label,
"uuid": audio_path,
"audioid":audio_path,
"split": [split if split != "valid" else "dev"],
"task_type": {"major": ["global_MIR"], "minor": ["vocal_technique_classification"]},
"domain": "music",
"source": "internet",
"other": {"tag":"null"}
}
# print(instrument)
data_samples.append(data_sample)
# if index > 2:
# break
existed_uuid_list = set()
all_instruments = set(k for k,v in class2id.items())
for data_sample in data_samples:
# change testset instruction to the choice format
# if data_sample["split"][0] != "test":
data_sample["instruction"] = data_sample["instruction"] + " Output from the following options: "
for k,v in class2id.items():
data_sample["instruction"] += f"{k}, "
data_sample["instruction"] = data_sample["instruction"][:-2] + ". "
# else:
# correct_instrument = data_sample["output"]
# incorrect_instruments = list(all_instruments - set(correct_instrument))
# if len(incorrect_instruments) >= 3:
# choices = random.sample(incorrect_instruments, 3) + ["CORRECT:" + correct_instrument]
# random.shuffle(choices)
# for idx, choice in enumerate(choices):
# if choice.startswith("CORRECT:"):
# choices[idx] = choice[8:]
# data_sample["output"] = number_to_letter[idx]
# data_sample["input"] = f"{data_sample['input']}. Choose from: A.{choices[0]} B.{choices[1]} C.{choices[2]} D.{choices[3]} "
# change uuid
uuid_string = f"{data_sample['instruction']}#{data_sample['input']}#{data_sample['output']}"
unique_id = hashlib.md5(uuid_string.encode()).hexdigest()[:16] #只取前16位
if unique_id in existed_uuid_list:
sha1_hash = hashlib.sha1(uuid_string.encode()).hexdigest()[:16] # 为了相加的时候位数对应上 # 将 MD5 和 SHA1 结果相加,并计算新的 MD5 作为最终的 UUID
unique_id = hashlib.md5((unique_id + sha1_hash).encode()).hexdigest()[:16]
existed_uuid_list.add(unique_id)
data_sample["uuid"] = f"{unique_id}"
# Save to JSONL format
for split in ["train", "dev", "test"]:
if split == "dev":
name = "valid"
else:
name = split
with open(f"VocalSet_{name}.jsonl", 'w') as outfile:
for sample in data_samples:
if sample["split"][0] == split:
json.dump(sample, outfile)
outfile.write('\n')
outfile.close()
# print(f"Data successfully transformed and saved to {output_file_path}")
|