File size: 2,047 Bytes
84d847c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import gradio as gr
import requests
import torch
import librosa

# URL to the external app.py file
FILE_URL = "https://huggingface.co/data-science-123/abcd/new/main?filename=app.py"

# Fetch the external app.py (or additional files) if needed
def fetch_external_file(url):
    response = requests.get(url)
    if response.status_code == 200:
        # Save to a local file if needed, or execute dynamically
        with open('external_app.py', 'wb') as file:
            file.write(response.content)
    else:
        raise Exception(f"Failed to fetch the file: {url}")

# Fetch the file if you need to load any logic from it
fetch_external_file(FILE_URL)

# Load the pre-trained model (replace with your RVC model path or logic)
from model import load_model, convert_voice

model = load_model("path_to_pretrained_model")

# Define the voice conversion logic
def voice_conversion(source_audio, target_voice):
    # Load and preprocess audio
    y, sr = librosa.load(source_audio)
    input_audio = torch.tensor(y).unsqueeze(0)

    # Use the model for voice conversion
    converted_audio = convert_voice(model, input_audio, target_voice)

    # Convert the output tensor to a numpy array and save it
    converted_audio_np = converted_audio.detach().cpu().numpy()
    output_file = "output_converted.wav"
    librosa.output.write_wav(output_file, converted_audio_np, sr)

    return output_file

# Gradio interface
def infer(source_audio, target_voice):
    # Call voice conversion function
    result_audio = voice_conversion(source_audio, target_voice)
    return result_audio

# Create Gradio interface
iface = gr.Interface(
    fn=infer,
    inputs=[
        gr.Audio(source="microphone", type="filepath", label="Source Audio"),
        gr.Dropdown(["Voice1", "Voice2", "Voice3"], label="Target Voice")
    ],
    outputs=gr.Audio(type="file", label="Converted Audio"),
    title="Retrieval-based Voice Conversion",
    description="Convert voice from a source audio to a target voice style."
)

if __name__ == "__main__":
    iface.launch()