root commited on
Commit
88a21ad
1 Parent(s): d11efa5

fix device for zero gpu

Browse files
Files changed (1) hide show
  1. app.py +13 -4
app.py CHANGED
@@ -7,8 +7,6 @@ music_gen_model = MusicgenForConditionalGeneration.from_pretrained("facebook/mus
7
  sampling_rate = music_gen_model.config.audio_encoder.sampling_rate
8
 
9
 
10
- device = "cuda" if torch.cuda.is_available() else "cpu"
11
- music_gen_model.to(device)
12
 
13
  from transformers import AutoProcessor
14
  processor = AutoProcessor.from_pretrained("facebook/musicgen-small")
@@ -17,31 +15,42 @@ from diffusers import DiffusionPipeline
17
 
18
  sd_pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
19
  # sd_pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
20
- sd_pipe.to(device)
21
 
22
 
23
  @spaces.GPU
24
  def generate_music(desc):
 
 
 
25
  inputs = processor(text=[desc], padding=True, return_tensors="pt")
26
  audio_values = music_gen_model.generate(**inputs.to(device), do_sample=True, guidance_scale=3, max_new_tokens=256)
27
  return sampling_rate, audio_values[0][0].cpu().numpy()
28
 
29
  @spaces.GPU
30
  def generate_pic(desc):
 
 
31
  return sd_pipe(prompt=desc).images[0]
32
 
 
 
 
 
 
33
  with gr.Blocks() as app:
34
  with gr.Row():
35
  music_desc = gr.TextArea(label="Music Description")
36
  music_pic = gr.Image(label="Music Image(StableDiffusion)")
37
  music_player = gr.Audio(label="Play My Tune")
38
 
39
- device_name = gr.Text(label='device name', value=device, interactive=False)
40
  gen_pic_btn = gr.Button("Gen Picture")
41
  gen_music_btn = gr.Button("Get Some Tune!!")
 
42
 
43
  gen_pic_btn.click(fn=generate_pic, inputs=[music_desc], outputs=[music_pic])
44
  gen_music_btn.click(fn=generate_music, inputs=[music_desc], outputs=[music_player])
 
45
 
46
  if __name__ == '__main__':
47
  app.launch()
 
7
  sampling_rate = music_gen_model.config.audio_encoder.sampling_rate
8
 
9
 
 
 
10
 
11
  from transformers import AutoProcessor
12
  processor = AutoProcessor.from_pretrained("facebook/musicgen-small")
 
15
 
16
  sd_pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
17
  # sd_pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
 
18
 
19
 
20
  @spaces.GPU
21
  def generate_music(desc):
22
+ device = "cuda" if torch.cuda.is_available() else "cpu"
23
+ music_gen_model.to(device)
24
+
25
  inputs = processor(text=[desc], padding=True, return_tensors="pt")
26
  audio_values = music_gen_model.generate(**inputs.to(device), do_sample=True, guidance_scale=3, max_new_tokens=256)
27
  return sampling_rate, audio_values[0][0].cpu().numpy()
28
 
29
  @spaces.GPU
30
  def generate_pic(desc):
31
+ device = "cuda" if torch.cuda.is_available() else "cpu"
32
+ sd_pipe.to(device)
33
  return sd_pipe(prompt=desc).images[0]
34
 
35
+ @spaces.GPU
36
+ def test_gpu():
37
+ device = "cuda" if torch.cuda.is_available() else "cpu"
38
+ return device
39
+
40
  with gr.Blocks() as app:
41
  with gr.Row():
42
  music_desc = gr.TextArea(label="Music Description")
43
  music_pic = gr.Image(label="Music Image(StableDiffusion)")
44
  music_player = gr.Audio(label="Play My Tune")
45
 
46
+ device_name = gr.Text(label='device name', interactive=False)
47
  gen_pic_btn = gr.Button("Gen Picture")
48
  gen_music_btn = gr.Button("Get Some Tune!!")
49
+ has_gpu_btn = gr.Button("test gpu")
50
 
51
  gen_pic_btn.click(fn=generate_pic, inputs=[music_desc], outputs=[music_pic])
52
  gen_music_btn.click(fn=generate_music, inputs=[music_desc], outputs=[music_player])
53
+ has_gpu_btn.click(fn=test_gpu, outputs=[device_name])
54
 
55
  if __name__ == '__main__':
56
  app.launch()