import os
import numpy as np
from pydub import AudioSegment
from scipy.ndimage import maximum_filter1d
import json
import hashlib
import tqdm
import os

from scipy.interpolate import interp1d
from scipy.signal import argrelmax



def infer_tempo(beats, fps, hist_smooth=4, no_tempo=-1):
    import madmom
    ibis = np.diff(beats) * fps
    bins = np.bincount(np.round(ibis).astype(int))
    if not bins.any():
        return no_tempo
    if hist_smooth > 0:
        bins = madmom.audio.signal.smooth(bins, hist_smooth)
    intervals = np.arange(len(bins))       
    interpolation_fn = interp1d(intervals, bins, 'quadratic')
    intervals = np.arange(intervals[0], intervals[-1], 0.001)
    tempi = 60.0 * fps / intervals
    print(tempi)
    bins = interpolation_fn(intervals)
    peaks = argrelmax(bins, mode='wrap')[0]
    if len(peaks) == 0:
        return no_tempo
    else:
        sorted_peaks = peaks[np.argsort(bins[peaks])[::-1]]
        return tempi[sorted_peaks][0]


def quantise(beats):
   return [int(round(b * 25)) / 25 for b in beats]


def get_sample(excerpt_path, beats, existed_uuid_list, split="train", key="gtzan", type="beat"):
    # print(f'processing {excerpt_path} ...')
    # print(f'beats: {beats}')

    data_sample = {
        "instruction": "Identify and list the timestamps of all beats in this audio track. Use the format of `0.0s,0.54s,1.0ss, ...`",
        "input": f"<|SOA|>{excerpt_path[len(PATH)+1:]}<|EOA|>",
        "output": ",".join([f"{b}s" for b in beats]),
        "uuid": "",
        "audioid": excerpt_path[len(PATH)+1:], # exclude the '/' at the beginning, to enable os.join.path
        "split": [split],
        "task_type": {"major": ["global_MIR"], "minor": ["beat_tracking"]},
        "domain": "music",
        "source": key,
        "other": {}
    }
    if type == "downbeat":
        data_sample["instruction"] = "Identify and list the timestamps of all downbeats in this audio track. Use the format of `0.0s,1.54s,3.0s, ...`"
        data_sample["task_type"]["minor"] = ["downbeat_tracking"]
        
    # change uuid
    uuid_string = f"{data_sample['instruction']}#{data_sample['input']}#{data_sample['output']}"
    unique_id = hashlib.md5(uuid_string.encode()).hexdigest()[:16] #只取前16位
    if unique_id in existed_uuid_list:
        sha1_hash = hashlib.sha1(uuid_string.encode()).hexdigest()[:16] # 为了相加的时候位数对应上 # 将 MD5 和 SHA1 结果相加,并计算新的 MD5 作为最终的 UUID
        unique_id = hashlib.md5((unique_id + sha1_hash).encode()).hexdigest()[:16]
    existed_uuid_list.add(unique_id)
    data_sample["uuid"] = f"{unique_id}"
    return data_sample


EXCERPT_LENGTH = 30 * 1000  # 30 seconds in milliseconds
MIN_LENGTH = 5 * 1000  # 5 seconds in milliseconds

PATH = '/work/fast_data_yinghao/Beat-Transformer/data'
load_annotation = np.load(f'{PATH}/full_beat_annotation.npz', allow_pickle=True)

for key in ["ballroom"]:  #"rwc", "ballroom", "gtzan", "hainsworth", "carnetic", "smc"
    # ballroom, GTZAN 30s, beat & downbeat
    # hainsworth, (RWC,) carnetic: split audio, beat & downbeat
    # smc: split audio, beat
    annotation = load_annotation[key]

    with open(f'{PATH}/audio_lists/{key}.txt', 'r') as f:
        audio_root = f.readlines()
    audio_root = [item.replace('\n', '') for item in audio_root]
    audio_root = [f'{PATH}/{item[37:]}' for item in audio_root]
    assert(len(annotation) == len(audio_root))

    existed_uuid_list = set()
    data_samples = []
    for idx, ann in tqdm.tqdm(enumerate(annotation)):
        # print(f'processing {audio_root[idx]} ...')
        audio_path = audio_root[idx]
        if len(ann.shape) == 1:
            beats = quantise(ann)
            downbeats = None
        elif key != "rwc":
            beats = quantise(ann[:,0])
            downbeats = quantise(ann[ann[:, 1] == 1, 0])
        else:
            NotImplementedError
            # beat = madmom.utils.quantize_events(annotation[:, 0], fps=self.fps, length=len(song))
            # beat = np.maximum(beat, maximum_filter1d(beat, size=3) * 0.5)
            # beat = np.maximum(beat, maximum_filter1d(beat, size=3) * 0.5)             
            # downbeat = annotation[annotation[:, 1] == 1][:, 0]
            # downbeat = madmom.utils.quantize_events(downbeat, fps=self.fps, length=len(song))
            # downbeat = np.maximum(downbeat, maximum_filter1d(downbeat, size=3) * 0.5)
            # downbeat = np.maximum(downbeat, maximum_filter1d(downbeat, size=3) * 0.5)
                            
        # print(f'tempo: {tempo}')

        if key =="ballroom":
            # tempo = infer_tempo(beats, fps=100)
            sample = get_sample(audio_path, beats, existed_uuid_list, key=key)
            data_samples.append(sample)
            sample = get_sample(audio_path, downbeats, existed_uuid_list, key=key, type="downbeat")
            data_samples.append(sample)          
        elif key == "gtzan":
            if "jazz.00054" in audio_path:
                continue
            sample = get_sample(audio_path, beats, existed_uuid_list, split="test", key=key)
            data_samples.append(sample)
            if downbeats:
                sample = get_sample(audio_path, downbeats, existed_uuid_list, split="test", key=key, type="downbeat")
                data_samples.append(sample)                    
        else:
            audio = AudioSegment.from_file(audio_path)
            for i in range(0, len(audio), EXCERPT_LENGTH):
                end = i + EXCERPT_LENGTH
                if end < len(audio):
                    excerpt = audio[i:end]
                else:
                    excerpt = audio[i:]
                    # Discard short audio clips
                    if len(excerpt) < MIN_LENGTH:
                        break
                    end = len(audio)
                    
                # # Save the excerpt to the same directory with a new name
                excerpt_path = f"{audio_path[:-4]}_{i//EXCERPT_LENGTH}.wav"
                if not os.path.exists(excerpt_path):
                    excerpt.export(excerpt_path, format="wav")

                excerpt_beats = [b%30 for b in beats if i * 30 <= b <= (i + 1) * 30]
                if downbeats:
                    excerpt_downbeats = [db%30 for db in downbeats if i * 30 <= db <= (i + 1) * 30]
                else:
                    excerpt_downbeats = None
                
                # tempo = infer_tempo(excerpt_beats, fps=100)
                sample = get_sample(excerpt_path, excerpt_beats, existed_uuid_list, key=key)
                data_samples.append(sample)
                if downbeats:
                    sample = get_sample(excerpt_path, excerpt_downbeats, existed_uuid_list, key=key, type="downbeat")    
                    data_samples.append(sample)      
            # Remove the original audio file
            # os.remove(audio_path)          
        
        # break
        

    split = "test" if key == "gtzan" else "train"
    output_file_path = f'{PATH}/../{key}_{split}.jsonl'  # Replace with the desired output path
    with open(output_file_path, 'w') as outfile:
        # for sample in data_samples:
        json.dump(data_samples, outfile)

        # outfile.write('\n')
    outfile.close()