Retrieval-based-Voice-Conversion / create and train voice model
cupcakes323's picture
Create create and train voice model
84d847c verified
import gradio as gr
import requests
import torch
import librosa
# URL to the external app.py file
FILE_URL = "https://huggingface.co/data-science-123/abcd/new/main?filename=app.py"
# Fetch the external app.py (or additional files) if needed
def fetch_external_file(url):
response = requests.get(url)
if response.status_code == 200:
# Save to a local file if needed, or execute dynamically
with open('external_app.py', 'wb') as file:
file.write(response.content)
else:
raise Exception(f"Failed to fetch the file: {url}")
# Fetch the file if you need to load any logic from it
fetch_external_file(FILE_URL)
# Load the pre-trained model (replace with your RVC model path or logic)
from model import load_model, convert_voice
model = load_model("path_to_pretrained_model")
# Define the voice conversion logic
def voice_conversion(source_audio, target_voice):
# Load and preprocess audio
y, sr = librosa.load(source_audio)
input_audio = torch.tensor(y).unsqueeze(0)
# Use the model for voice conversion
converted_audio = convert_voice(model, input_audio, target_voice)
# Convert the output tensor to a numpy array and save it
converted_audio_np = converted_audio.detach().cpu().numpy()
output_file = "output_converted.wav"
librosa.output.write_wav(output_file, converted_audio_np, sr)
return output_file
# Gradio interface
def infer(source_audio, target_voice):
# Call voice conversion function
result_audio = voice_conversion(source_audio, target_voice)
return result_audio
# Create Gradio interface
iface = gr.Interface(
fn=infer,
inputs=[
gr.Audio(source="microphone", type="filepath", label="Source Audio"),
gr.Dropdown(["Voice1", "Voice2", "Voice3"], label="Target Voice")
],
outputs=gr.Audio(type="file", label="Converted Audio"),
title="Retrieval-based Voice Conversion",
description="Convert voice from a source audio to a target voice style."
)
if __name__ == "__main__":
iface.launch()