Jaykintecblic commited on
Commit
af69776
1 Parent(s): 069cb3b

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +15 -11
handler.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Dict, Any, Generator
2
  from PIL import Image
3
  import torch
4
  from transformers import AutoModelForCausalLM, AutoProcessor
@@ -36,14 +36,16 @@ class EndpointHandler:
36
  image = to_channel_dimension_format(image, ChannelDimension.FIRST)
37
  return torch.tensor(image)
38
 
39
- def stream_response(self, data: Dict[str, Any]) -> Generator[Dict[str, Any], None, None]:
 
40
  image = data.get("inputs")
 
41
  if isinstance(image, str):
42
  try:
43
  image = Image.open(image)
44
  except Exception as e:
45
- yield {"error": f"Failed to open image: {e}"}
46
- return
47
 
48
  try:
49
  inputs = self.processor.tokenizer(
@@ -54,14 +56,16 @@ class EndpointHandler:
54
  inputs["pixel_values"] = self.processor.image_processor([image], transform=self.custom_transform)
55
  inputs = {k: v.to(self.device) for k, v in inputs.items()}
56
 
57
- for generated_ids in self.model.generate(**inputs, bad_words_ids=self.bad_words_ids, max_length=2048, early_stopping=True, return_dict_in_generate=True, output_scores=True):
58
- generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
59
- yield {"label": generated_text, "score": 1.0}
60
 
61
  except torch.cuda.CudaError as e:
62
- yield {"error": f"CUDA error: {e}"}
63
  except Exception as e:
64
- yield {"error": f"Unexpected error: {e}"}
 
 
65
 
66
- def __call__(self, data: Dict[str, Any]) -> Generator[Dict[str, Any], None, None]:
67
- return self.stream_response(data)
 
1
+ from typing import Dict, Any, List
2
  from PIL import Image
3
  import torch
4
  from transformers import AutoModelForCausalLM, AutoProcessor
 
36
  image = to_channel_dimension_format(image, ChannelDimension.FIRST)
37
  return torch.tensor(image)
38
 
39
+ def generate_responses(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
40
+ results = []
41
  image = data.get("inputs")
42
+
43
  if isinstance(image, str):
44
  try:
45
  image = Image.open(image)
46
  except Exception as e:
47
+ results.append({"error": f"Failed to open image: {e}"})
48
+ return results
49
 
50
  try:
51
  inputs = self.processor.tokenizer(
 
56
  inputs["pixel_values"] = self.processor.image_processor([image], transform=self.custom_transform)
57
  inputs = {k: v.to(self.device) for k, v in inputs.items()}
58
 
59
+ generated_ids = self.model.generate(**inputs, bad_words_ids=self.bad_words_ids, max_length=2048, early_stopping=True)
60
+ generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
61
+ results.append({"label": generated_text, "score": 1.0})
62
 
63
  except torch.cuda.CudaError as e:
64
+ results.append({"error": f"CUDA error: {e}"})
65
  except Exception as e:
66
+ results.append({"error": f"Unexpected error: {e}"})
67
+
68
+ return results
69
 
70
+ def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
71
+ return self.generate_responses(data)