File size: 3,024 Bytes
b8fc28a
ef187eb
 
0916cce
d0f928e
63b6eaf
f14baf4
 
0916cce
2b0f02c
11fa80e
0cffd40
8b1e96d
0916cce
86a5e68
8b1e96d
ec35e66
 
 
 
4efab5c
 
 
ec35e66
 
4efab5c
 
 
 
 
 
d830279
 
 
 
 
 
 
4efab5c
c530610
 
 
 
8b1e96d
 
 
b3e3306
02072e0
ce19625
f4107e3
383d0eb
 
9b38787
86a5e68
 
 
 
f14baf4
3a2b9b2
8b1e96d
ce19625
f14baf4
0916cce
ce19625
 
0916cce
86a5e68
 
 
0916cce
8b3ca8d
0916cce
 
 
 
 
 
86a5e68
0cffd40
8b1e96d
0cffd40
d830279
0916cce
 
86a5e68
 
 
0916cce
 
b8fc28a
0916cce
 
86a5e68
0916cce
 
86a5e68
0916cce
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import os
import gradio as gr
import torch
from diffusers import StableAudioPipeline
import spaces
from translatepy import Translator
import numpy as np
import random
import soundfile as sf

translator = Translator()

# Constants
model = "stabilityai/stable-audio-open-1.0"
MAX_SEED = np.iinfo(np.int32).max

CSS = """
.gradio-container {
  max-width: 690px !important;
}
footer {
    visibility: hidden;
}
"""

JS = """function () {
  gradioURL = window.location.href
  if (!gradioURL.endsWith('?__theme=dark')) {
    window.location.replace(gradioURL + '?__theme=dark');
  }
}"""
DESCRIPTION = """
<center>
Stable Audio Open 1.0 generates variable-length (up to 47s) stereo audio at 44.1kHz from text prompts. \
It comprises three components: an autoencoder that compresses waveforms into a manageable sequence length, \
a T5-based text embedding for text conditioning, and a transformer-based diffusion (DiT) model that operates in the latent space of the autoencoder.
</center>
"""

pipe = StableAudioPipeline.from_pretrained(
    model,
    torch_dtype=torch.float16)
pipe = pipe.to("cuda")


# Function 
@spaces.GPU(duration=120)
def main(
    prompt,
    negative="low quality",
    second: float = 10.0,
    seed: int = -1):
    
    if seed == -1:
         seed = random.randint(0, MAX_SEED)
    seed = int(seed)
    generator = torch.Generator().manual_seed(seed)
    
    prompt = str(translator.translate(prompt, 'English'))

    print(f'prompt:{prompt}')

    audio = pipe(
        prompt, 
        negative_prompt=negative, 
        audio_end_in_s=second,
        num_inference_steps=200,
        num_waveforms_per_prompt=3,
        generator=generator,
    ).audios

    os.makedirs("outputs", exist_ok=True)
    base_count = len(glob(os.path.join("outputs", "*.mp4")))
    audio_path = os.path.join("outputs", f"{base_count:06d}.wav")
    
    sf.write(audio_path, audio[0].T.float().cpu().numpy(), pipe.vae.samping_rate)
    
    return audio_path, seed

# Gradio Interface

with gr.Blocks(theme='soft', css=CSS, js=JS, title="Stable Audio Open") as iface:
    with gr.Accordion(""):
        gr.Markdown(DESCRIPTION)
    output = gr.Audio(label="Podcast", type="filepath", interactive=False, autoplay=True, elem_classes="audio")  # Create an output textbox
    prompt = gr.Textbox(label="Prompt", placeholder="1000 BPM percussive sound of water drops")
    negative = gr.Textbox(label="Negative prompt", placeholder="Low quality")
    with gr.Row():
        second =  gr.Slider(5.0, 60.0, value=10.0, label="Second", step=0.1),
        seed = gr.Slider(-1, MAX_SEED, value=-1, label="Seed", step=1),
    with gr.Row():
        submit_btn = gr.Button("πŸš€ Send")  # Create a submit button
        clear_btn = gr.ClearButton([prompt, seed, output], value="πŸ—‘οΈ Clear") # Create a clear button

    # Set up the event listeners
    submit_btn.click(main, inputs=[prompt, negative, second, seed], outputs=[output, seed])


#gr.close_all()

iface.queue().launch(show_api=False)  # Launch the Gradio interface