AdrienB134 commited on
Commit
a4c6545
1 Parent(s): b041f93
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -48,7 +48,7 @@ def model_inference(
48
  model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-2B-Instruct",attn_implementation="flash_attention_2", trust_remote_code=True, torch_dtype="auto").cuda().eval()
49
 
50
  # default processer
51
- processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct")
52
 
53
  # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
54
  # min_pixels = 256*28*28
 
48
  model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-2B-Instruct",attn_implementation="flash_attention_2", trust_remote_code=True, torch_dtype="auto").cuda().eval()
49
 
50
  # default processer
51
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
52
 
53
  # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
54
  # min_pixels = 256*28*28