import json import hashlib import random PATH = "/work/fast_data_yinghao/MTG/mtg-jamendo-dataset/data" # Load your dataset number_to_letter = { 0: "A", 1: "B", 2: "C", 3: "D" } def get_emotion(_="test"): data_samples = [] for split in [_]: input_file_path = f'{PATH}/splits/split-0/autotagging_moodtheme-{split}.tsv' # Replace with the actual path to your dataset with open(input_file_path, "r") as f: for idx, line in enumerate(f): if idx > 0: tmp = line.strip().split("\t") emotions = tmp[5:] audio_path = tmp[3] audioid = audio_path.split("/")[-1] if "low" not in audioid: audioid = audioid[:-4] + ".low.mp3" data_sample = { "instruction": "Please provide the emotion of given audio.", "input": f"<|SOA|>f'{audio_path}'<|EOA|>", "output": ", ".join(sorted([emotion.split("---")[-1] for emotion in emotions])), "uuid": audio_path, "audioid": audio_path, "split": [split if split != "validation" else "dev"], "task_type": {"major": ["global_MIR"], "minor": ["emotion_classification"]}, "domain": "music", "source": "MTG", "other": {"tag":"null"} } data_samples.append(data_sample) # if idx > 2: # break f.close() existed_uuid_list = set() all_emotions = set(emotion for data_sample in data_samples for emotion in data_sample["output"].split(", ")) for data_sample in data_samples: # change testset instruction to the choice format data_sample["instruction"] = data_sample["instruction"] + " If you can find multiple emotions, please output in alphabeta order. Use ', ' to split multiple tags." # change uuid uuid_string = f"{data_sample['instruction']}#{data_sample['input']}#{data_sample['output']}" unique_id = hashlib.md5(uuid_string.encode()).hexdigest()[:16] #只取前16位 if unique_id in existed_uuid_list: sha1_hash = hashlib.sha1(uuid_string.encode()).hexdigest()[:16] # 为了相加的时候位数对应上 # 将 MD5 和 SHA1 结果相加,并计算新的 MD5 作为最终的 UUID unique_id = hashlib.md5((unique_id + sha1_hash).encode()).hexdigest()[:16] existed_uuid_list.add(unique_id) data_sample["uuid"] = f"{unique_id}" return data_samples if __name__ == "__main__": print("start") for split in ["test", "train", "validation"]: data_samples = get_emotion(split) # Save to JSONL format output_file_path = f'emotion_{split}.jsonl' with open(output_file_path, 'w') as outfile: for sample in data_samples: json.dump(sample, outfile) outfile.write('\n') outfile.close()