hperkins commited on
Commit
b45af94
1 Parent(s): b150b57

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +4 -34
handler.py CHANGED
@@ -1,9 +1,8 @@
1
  import json
2
  import torch
3
- from transformers import Qwen2VLForConditionalGeneration, AutoProcessor, pipeline
4
  from qwen_vl_utils import process_vision_info
5
 
6
-
7
  class EndpointHandler:
8
  def __init__(self, model_dir):
9
  # Configure device settings
@@ -28,10 +27,11 @@ class EndpointHandler:
28
  print(f"Error loading processor: {e}")
29
  raise
30
 
31
- # Define a VQA pipeline
32
  self.vqa_pipeline = pipeline(
33
  task="visual-question-answering",
34
  model=self.model,
 
35
  device=0 if torch.cuda.is_available() else -1 # Use first GPU or CPU
36
  )
37
 
@@ -87,34 +87,4 @@ class EndpointHandler:
87
  except Exception as e:
88
  error_message = f"Error: {str(e)}"
89
  print(error_message)
90
- return json.dumps({"error": error_message})
91
-
92
-
93
- # Example instantiation and call
94
- if __name__ == "__main__":
95
- # Assuming model directory is set in your context/environment
96
- model_directory = "your_model_directory"
97
- handler = EndpointHandler(model_directory)
98
-
99
- # Sample request data
100
- sample_request = json.dumps({
101
- "messages": [
102
- {
103
- "role": "user",
104
- "content": [
105
- {
106
- "type": "image",
107
- "image": "path_or_url_to_your_image"
108
- },
109
- {
110
- "type": "text",
111
- "text": "Describe what you see in the image."
112
- }
113
- ]
114
- }
115
- ]
116
- })
117
-
118
- # Process the request
119
- output = handler(sample_request)
120
- print(output)
 
1
  import json
2
  import torch
3
+ from transformers import Qwen2VLForConditionalGeneration, AutoProcessor, pipeline, PreTrainedImageProcessor
4
  from qwen_vl_utils import process_vision_info
5
 
 
6
  class EndpointHandler:
7
  def __init__(self, model_dir):
8
  # Configure device settings
 
27
  print(f"Error loading processor: {e}")
28
  raise
29
 
30
+ # Define a VQA pipeline with explicitly provided processor
31
  self.vqa_pipeline = pipeline(
32
  task="visual-question-answering",
33
  model=self.model,
34
+ image_processor=self.processor, # Explicitly pass the image processor
35
  device=0 if torch.cuda.is_available() else -1 # Use first GPU or CPU
36
  )
37
 
 
87
  except Exception as e:
88
  error_message = f"Error: {str(e)}"
89
  print(error_message)
90
+ return json.dumps({"error": error_message})