Spaces:
Running
on
Zero
Running
on
Zero
AdrienB134
commited on
Commit
•
e809fa7
1
Parent(s):
0fc84fe
hugvh
Browse files
app.py
CHANGED
@@ -42,12 +42,7 @@ def model_inference(
|
|
42 |
# )
|
43 |
|
44 |
#We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
|
45 |
-
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
46 |
-
"Qwen/Qwen2-VL-7B-Instruct",
|
47 |
-
torch_dtype=torch.bfloat16,
|
48 |
-
# attn_implementation="flash_attention_2",
|
49 |
-
device_map="auto",
|
50 |
-
)
|
51 |
|
52 |
# default processer
|
53 |
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct")
|
|
|
42 |
# )
|
43 |
|
44 |
#We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
|
45 |
+
model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True, torch_dtype="auto").cuda().eval()
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
# default processer
|
48 |
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct")
|