Spaces:
Running
on
Zero
Running
on
Zero
VictorSanh
commited on
Commit
•
01b5c05
1
Parent(s):
327287c
zero gpu, thanks @cbensimon !
Browse files- app.py +4 -0
- pre-requirements.txt +0 -1
- requirements.txt +0 -3
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import os
|
2 |
import subprocess
|
|
|
3 |
import torch
|
4 |
|
5 |
import gradio as gr
|
@@ -15,6 +16,8 @@ from PIL import Image
|
|
15 |
from transformers.image_transforms import resize, to_channel_dimension_format
|
16 |
|
17 |
|
|
|
|
|
18 |
DEVICE = torch.device("cuda")
|
19 |
PROCESSOR = AutoProcessor.from_pretrained(
|
20 |
"HuggingFaceM4/VLM_WebSight_finetuned",
|
@@ -113,6 +116,7 @@ def render_webpage(
|
|
113 |
return Image.open(output_path_screenshot)
|
114 |
|
115 |
|
|
|
116 |
def model_inference(
|
117 |
image,
|
118 |
):
|
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
+
import spaces
|
4 |
import torch
|
5 |
|
6 |
import gradio as gr
|
|
|
16 |
from transformers.image_transforms import resize, to_channel_dimension_format
|
17 |
|
18 |
|
19 |
+
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
20 |
+
|
21 |
DEVICE = torch.device("cuda")
|
22 |
PROCESSOR = AutoProcessor.from_pretrained(
|
23 |
"HuggingFaceM4/VLM_WebSight_finetuned",
|
|
|
116 |
return Image.open(output_path_screenshot)
|
117 |
|
118 |
|
119 |
+
@spaces.GPU(duration=180)
|
120 |
def model_inference(
|
121 |
image,
|
122 |
):
|
pre-requirements.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
torch
|
|
|
|
requirements.txt
CHANGED
@@ -1,7 +1,4 @@
|
|
1 |
playwright
|
2 |
transformers
|
3 |
-
torch
|
4 |
packaging
|
5 |
ninja
|
6 |
-
einops
|
7 |
-
flash-attn
|
|
|
1 |
playwright
|
2 |
transformers
|
|
|
3 |
packaging
|
4 |
ninja
|
|
|
|