File size: 5,734 Bytes
d8b6286 68f6bb9 6d3bc8f 7d6a9ed 46b30ee 7d6a9ed 8a8a249 68f6bb9 3152b48 8a8a249 3152b48 307d4dd 3152b48 307d4dd 3152b48 ceffe1d 46b30ee e4591db 46b30ee 307d4dd 3152b48 46b30ee d579519 46b30ee 3152b48 46b30ee 3152b48 d579519 3152b48 d579519 3152b48 e4591db 46b30ee d579519 46b30ee d579519 21019ce 46b30ee 307d4dd 46b30ee 307d4dd 0bca3ec 46b30ee 68f6bb9 913c46d 68f6bb9 46b30ee 68f6bb9 307d4dd 46b30ee 68f6bb9 3152b48 68f6bb9 dedd569 3152b48 68f6bb9 3152b48 6d3bc8f 46b30ee 68f6bb9 6d3bc8f 68f6bb9 3152b48 46b30ee 0bca3ec 68f6bb9 aa9a5ce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
import torch
import os
from pyannote.audio import Pipeline
from pydub import AudioSegment
import gradio as gr
# 获取 Hugging Face 认证令牌
HF_TOKEN = os.environ.get("HUGGINGFACE_READ_TOKEN")
pipeline = None
# 尝试加载 pyannote 模型
try:
pipeline = Pipeline.from_pretrained(
"pyannote/speaker-diarization-3.1", use_auth_token=HF_TOKEN
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
pipeline.to(device)
except Exception as e:
print(f"Error initializing pipeline: {e}")
pipeline = None
# 时间戳转换为秒
def timestamp_to_seconds(timestamp):
h, m, s = map(float, timestamp.split(':'))
return 3600 * h + 60 * m + s
# 音频拼接函数:拼接目标音频和混合音频,返回目标音频的起始时间和结束时间作为字典
def combine_audio_with_time(target_audio, mixed_audio):
if pipeline is None:
return "错误: 模型未初始化"
# 打印文件路径,确保文件正确传递
print(f"目标音频文件路径: {target_audio}")
print(f"混合音频文件路径: {mixed_audio}")
# 加载目标说话人的样本音频
try:
target_audio_segment = AudioSegment.from_wav(target_audio)
except Exception as e:
return f"加载目标音频时出错: {e}"
# 加载混合音频
try:
mixed_audio_segment = AudioSegment.from_wav(mixed_audio)
except Exception as e:
return f"加载混合音频时出错: {e}"
# 记录目标说话人音频的时间点(精确到0.01秒)
target_start_time = len(mixed_audio_segment) / 1000 # 秒为单位,精确到 0.01 秒
# 目标音频的结束时间(拼接后的音频长度)
target_end_time = target_start_time + len(target_audio_segment) / 1000 # 秒为单位
# 将目标说话人的音频片段添加到混合音频的最后
final_audio = mixed_audio_segment + target_audio_segment
final_audio.export("final_output.wav", format="wav")
# 返回目标音频的起始时间和结束时间
return {"start_time": target_start_time, "end_time": target_end_time}
# 使用 pyannote/speaker-diarization 对拼接后的音频进行说话人分离
def diarize_audio(temp_file):
if pipeline is None:
return "错误: 模型未初始化"
try:
diarization = pipeline(temp_file)
print("说话人分离结果:")
for turn, _, speaker in diarization.itertracks(yield_label=True):
print(f"[{turn.start:.3f} --> {turn.end:.3f}] {speaker}")
return diarization
except Exception as e:
return f"处理音频时出错: {e}"
# 查找最匹配的说话人
def find_best_matching_speaker(target_start_time, target_end_time, diarization):
best_match = None
max_overlap = 0
# 遍历所有说话人时间段,计算与目标音频的重叠部分
for turn, _, speaker in diarization.itertracks(yield_label=True):
start = turn.start
end = turn.end
# 计算重叠部分的开始和结束时间
overlap_start = max(start, target_start_time)
overlap_end = min(end, target_end_time)
# 如果有重叠部分,计算重叠的持续时间
if overlap_end > overlap_start:
overlap_duration = overlap_end - overlap_start
# 如果当前重叠部分更大,则更新最匹配的说话人
if overlap_duration > max_overlap:
max_overlap = overlap_duration
best_match = speaker
return best_match, max_overlap
# 获取最匹配的说话人并返回其时间段
def process_audio(target_audio, mixed_audio):
print(f"处理音频:目标音频: {target_audio}, 混合音频: {mixed_audio}")
# 进行音频拼接并返回目标音频的起始和结束时间(作为字典)
time_dict = combine_audio_with_time(target_audio, mixed_audio)
# 如果音频拼接出错,返回错误信息
if isinstance(time_dict, str):
return time_dict
# 执行说话人分离
diarization_result = diarize_audio("final_output.wav")
if isinstance(diarization_result, str) and diarization_result.startswith("错误"):
return diarization_result # 出错时返回错误信息
else:
# 获取拼接后的音频长度
final_audio_length = len(AudioSegment.from_wav("final_output.wav")) / 1000 # 秒为单位
# 查找最匹配的说话人
best_match, overlap_duration = find_best_matching_speaker(
time_dict['start_time'],
time_dict['end_time'],
diarization_result
)
if best_match:
return {
'best_matching_speaker': best_match,
'overlap_duration': overlap_duration
}
else:
return "未找到匹配的说话人。"
# Gradio 接口
with gr.Blocks() as demo:
gr.Markdown("""
# 🗣️ 音频拼接与说话人分类 🗣️
上传目标音频和混合音频,拼接并进行说话人分类。
结果包括最匹配的说话人以及重叠时长。
""")
mixed_audio_input = gr.Audio(type="filepath", label="上传混合音频")
target_audio_input = gr.Audio(type="filepath", label="上传目标说话人音频")
process_button = gr.Button("处理音频")
# 输出结果
diarization_output = gr.Textbox(label="最匹配的说话人及重叠时长")
# 点击按钮时触发处理音频
process_button.click(
fn=process_audio,
inputs=[target_audio_input, mixed_audio_input],
outputs=[diarization_output]
)
demo.launch(share=True)
|