aiqtech commited on
Commit
4dd72e8
·
verified ·
1 Parent(s): 80845a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -6
app.py CHANGED
@@ -2,14 +2,28 @@ import spaces
2
  import os
3
  import time
4
  import torch
5
- from transformers import AutoProcessor, AutoModelForVision2Seq
6
  import gradio as gr
7
  from threading import Thread
8
  from PIL import Image
9
 
10
- # Model and processor initialization
11
- processor = AutoProcessor.from_pretrained("Qwen/QVQ-72B-Preview")
12
- model = AutoModelForVision2Seq.from_pretrained("Qwen/QVQ-72B-Preview").cuda().eval()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  # Footer
15
  footer = """
@@ -26,9 +40,12 @@ def process_image(image, text_input=None):
26
 
27
  # Prepare inputs
28
  if text_input:
29
- inputs = processor(text=text_input, images=image, return_tensors="pt").to("cuda:0")
30
  else:
31
- inputs = processor(images=image, return_tensors="pt").to("cuda:0")
 
 
 
32
 
33
  # Generate output
34
  outputs = model.generate(**inputs, max_new_tokens=1000)
 
2
  import os
3
  import time
4
  import torch
 
5
  import gradio as gr
6
  from threading import Thread
7
  from PIL import Image
8
 
9
+ # Install required packages
10
+ import subprocess
11
+ subprocess.run('pip install --upgrade transformers', shell=True)
12
+ subprocess.run('pip install accelerate', shell=True)
13
+
14
+ from transformers import AutoProcessor, AutoModelForCausalLM
15
+
16
+ # Model and processor initialization with trust_remote_code=True
17
+ processor = AutoProcessor.from_pretrained(
18
+ "Qwen/QVQ-72B-Preview",
19
+ trust_remote_code=True
20
+ )
21
+
22
+ model = AutoModelForCausalLM.from_pretrained(
23
+ "Qwen/QVQ-72B-Preview",
24
+ trust_remote_code=True,
25
+ device_map="auto"
26
+ ).eval()
27
 
28
  # Footer
29
  footer = """
 
40
 
41
  # Prepare inputs
42
  if text_input:
43
+ inputs = processor(text=text_input, images=image, return_tensors="pt")
44
  else:
45
+ inputs = processor(images=image, return_tensors="pt")
46
+
47
+ # Move inputs to the same device as the model
48
+ inputs = {k: v.to(model.device) for k, v in inputs.items()}
49
 
50
  # Generate output
51
  outputs = model.generate(**inputs, max_new_tokens=1000)