Gabriel commited on
Commit
92feb47
1 Parent(s): 603b021

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +21 -8
handler.py CHANGED
@@ -4,15 +4,18 @@ from PIL import Image
4
  import io
5
  import base64
6
  import requests
 
7
 
8
  class EndpointHandler():
9
  def __init__(self, path=""):
10
  self.processor = AutoProcessor.from_pretrained(path)
11
- self.model = Qwen2VLForConditionalGeneration.from_pretrained(path)
 
 
12
 
13
  def __call__(self, data: Any) -> Dict[str, Any]:
14
  image_input = data.get('image')
15
- text_input = data.get('text', "Describe this image.")
16
 
17
  if image_input is None:
18
  return {"error": "No image provided."}
@@ -26,27 +29,37 @@ class EndpointHandler():
26
  except Exception as e:
27
  return {"error": f"Failed to process the image. Details: {str(e)}"}
28
 
29
- messages = [
30
  {
31
  "role": "user",
32
  "content": [
33
- {"type": "image", "image": image},
34
  {"type": "text", "text": text_input},
35
  ],
36
  }
37
  ]
38
 
39
- text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
 
 
 
40
  inputs = self.processor(
41
- text=[text],
42
  images=[image],
43
  padding=True,
44
  return_tensors="pt",
45
  )
46
 
47
- generate_ids = self.model.generate(inputs.input_ids, max_length=30)
 
 
 
 
 
 
 
48
  output_text = self.processor.batch_decode(
49
- generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
50
  )[0]
51
 
52
  return {"generated_text": output_text}
 
4
  import io
5
  import base64
6
  import requests
7
+ import torch
8
 
9
  class EndpointHandler():
10
  def __init__(self, path=""):
11
  self.processor = AutoProcessor.from_pretrained(path)
12
+ self.model = Qwen2VLForConditionalGeneration.from_pretrained(
13
+ path, device_map="auto"
14
+ )
15
 
16
  def __call__(self, data: Any) -> Dict[str, Any]:
17
  image_input = data.get('image')
18
+ text_input = data.get('text', "Describe this image.")
19
 
20
  if image_input is None:
21
  return {"error": "No image provided."}
 
29
  except Exception as e:
30
  return {"error": f"Failed to process the image. Details: {str(e)}"}
31
 
32
+ conversation = [
33
  {
34
  "role": "user",
35
  "content": [
36
+ {"type": "image"},
37
  {"type": "text", "text": text_input},
38
  ],
39
  }
40
  ]
41
 
42
+ text_prompt = self.processor.apply_chat_template(
43
+ conversation, add_generation_prompt=True
44
+ )
45
+
46
  inputs = self.processor(
47
+ text=[text_prompt],
48
  images=[image],
49
  padding=True,
50
  return_tensors="pt",
51
  )
52
 
53
+ inputs = inputs.to(self.model.device)
54
+
55
+ output_ids = self.model.generate(**inputs, max_new_tokens=128)
56
+
57
+ generated_ids = [
58
+ output_id[len(input_id):] for input_id, output_id in zip(inputs.input_ids, output_ids)
59
+ ]
60
+
61
  output_text = self.processor.batch_decode(
62
+ generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
63
  )[0]
64
 
65
  return {"generated_text": output_text}