lehduong commited on
Commit
546ca50
1 Parent(s): 69bafd6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import subprocess
2
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
3
-
4
  import gradio as gr
5
  import torch
6
  import base64
@@ -89,6 +89,7 @@ class MolmoCaptionProcessor:
89
  device_map='auto'
90
  )
91
 
 
92
  def generate_response(self, image: Image.Image, msg: str) -> str:
93
  inputs = self.processor.process(
94
  images=[image],
@@ -218,6 +219,8 @@ def update_prompt(images: List[Image.Image], task_type: str, custom_msg: str = N
218
  except Exception as e:
219
  return "", f"Error generating captions: {str(e)}"
220
 
 
 
221
  def generate_image(images: List[Image.Image], prompt: str, negative_prompt: str, num_inference_steps: int, guidance_scale: float,
222
  denoise_mask: List[str], task_type: str, azimuth: str, elevation: str, distance: str, focal_length: float,
223
  height: int = 1024, width: int = 1024, scale_factor: float = 1.0, scale_watershed: float = 1.0,
 
1
  import subprocess
2
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
3
+ import spaces
4
  import gradio as gr
5
  import torch
6
  import base64
 
89
  device_map='auto'
90
  )
91
 
92
+ @spaces.GPU(duration=120)
93
  def generate_response(self, image: Image.Image, msg: str) -> str:
94
  inputs = self.processor.process(
95
  images=[image],
 
219
  except Exception as e:
220
  return "", f"Error generating captions: {str(e)}"
221
 
222
+
223
+ @spaces.GPU(duration=120)
224
  def generate_image(images: List[Image.Image], prompt: str, negative_prompt: str, num_inference_steps: int, guidance_scale: float,
225
  denoise_mask: List[str], task_type: str, azimuth: str, elevation: str, distance: str, focal_length: float,
226
  height: int = 1024, width: int = 1024, scale_factor: float = 1.0, scale_watershed: float = 1.0,