sergiopaniego commited on
Commit
5ca3297
1 Parent(s): 247d4bf

Formated code

Browse files
Files changed (1) hide show
  1. app.py +17 -35
app.py CHANGED
@@ -4,12 +4,28 @@ from transformers import Qwen2VLForConditionalGeneration, Qwen2VLProcessor
4
  from qwen_vl_utils import process_vision_info
5
  import torch
6
  from PIL import Image
7
- import subprocess
8
  from datetime import datetime
9
  import numpy as np
10
  import os
11
 
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  def array_to_image_path(image_array):
15
  if image_array is None:
@@ -30,41 +46,9 @@ def array_to_image_path(image_array):
30
  return full_path
31
 
32
 
33
- model_id = "Qwen/Qwen2-VL-7B-Instruct"
34
- model = Qwen2VLForConditionalGeneration.from_pretrained(
35
- model_id,
36
- device_map="auto",
37
- torch_dtype=torch.bfloat16,
38
- )
39
- adapter_path = "sergiopaniego/qwen2-7b-instruct-trl-sft-ChartQA"
40
- model.load_adapter(adapter_path)
41
-
42
- processor = Qwen2VLProcessor.from_pretrained(model_id)
43
-
44
- DESCRIPTION = """
45
- # Qwen2-VL-7B-trl-sft-ChartQA Demo
46
-
47
- This is a demo Space for a fine-tuned version of [Qwen2-VL-7B](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct) trained using [ChatQA dataset](https://huggingface.co/datasets/HuggingFaceM4/ChartQA).
48
-
49
- The corresponding model is located [here](https://huggingface.co/sergiopaniego/qwen2-7b-instruct-trl-sft-ChartQA)
50
- """
51
-
52
- kwargs = {}
53
- kwargs['torch_dtype'] = torch.bfloat16
54
-
55
- user_prompt = '<|user|>\n'
56
- assistant_prompt = '<|assistant|>\n'
57
- prompt_suffix = "<|end|>\n"
58
-
59
  @spaces.GPU
60
  def run_example(image, text_input=None):
61
  image_path = array_to_image_path(image)
62
-
63
- print(image_path)
64
- #model = models[model_id]
65
- #processor = processors[model_id]
66
-
67
- prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}"
68
  image = Image.fromarray(image).convert("RGB")
69
  messages = [
70
  {
@@ -121,13 +105,11 @@ with gr.Blocks(css=css) as demo:
121
  with gr.Row():
122
  with gr.Column():
123
  input_img = gr.Image(label="Input Picture")
124
- #model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="sergiopaniego/qwen2-7b-instruct-trl-sft-ChartQA")
125
  text_input = gr.Textbox(label="Question")
126
  submit_btn = gr.Button(value="Submit")
127
  with gr.Column():
128
  output_text = gr.Textbox(label="Output Text")
129
 
130
- #submit_btn.click(run_example, [input_img, text_input, model_selector], [output_text])
131
  submit_btn.click(run_example, [input_img, text_input], [output_text])
132
 
133
  demo.queue(api_open=False)
 
4
  from qwen_vl_utils import process_vision_info
5
  import torch
6
  from PIL import Image
 
7
  from datetime import datetime
8
  import numpy as np
9
  import os
10
 
11
 
12
+ DESCRIPTION = """
13
+ # Qwen2-VL-7B-trl-sft-ChartQA Demo
14
+
15
+ This is a demo Space for a fine-tuned version of [Qwen2-VL-7B](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct) trained using [ChatQA dataset](https://huggingface.co/datasets/HuggingFaceM4/ChartQA).
16
+
17
+ The corresponding model is located [here](https://huggingface.co/sergiopaniego/qwen2-7b-instruct-trl-sft-ChartQA).
18
+ """
19
+
20
+ model_id = "Qwen/Qwen2-VL-7B-Instruct"
21
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
22
+ model_id,
23
+ device_map="auto",
24
+ torch_dtype=torch.bfloat16,
25
+ )
26
+ adapter_path = "sergiopaniego/qwen2-7b-instruct-trl-sft-ChartQA"
27
+ model.load_adapter(adapter_path)
28
+ processor = Qwen2VLProcessor.from_pretrained(model_id)
29
 
30
  def array_to_image_path(image_array):
31
  if image_array is None:
 
46
  return full_path
47
 
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  @spaces.GPU
50
  def run_example(image, text_input=None):
51
  image_path = array_to_image_path(image)
 
 
 
 
 
 
52
  image = Image.fromarray(image).convert("RGB")
53
  messages = [
54
  {
 
105
  with gr.Row():
106
  with gr.Column():
107
  input_img = gr.Image(label="Input Picture")
 
108
  text_input = gr.Textbox(label="Question")
109
  submit_btn = gr.Button(value="Submit")
110
  with gr.Column():
111
  output_text = gr.Textbox(label="Output Text")
112
 
 
113
  submit_btn.click(run_example, [input_img, text_input], [output_text])
114
 
115
  demo.queue(api_open=False)