Aliayub1995 commited on
Commit
8a770f2
1 Parent(s): 3e3aa94

Upload handler.py

Browse files
Files changed (1) hide show
  1. handler.py +8 -7
handler.py CHANGED
@@ -3,6 +3,7 @@ import sys
3
  sys.path.append('./')
4
  from videollama2 import model_init, mm_infer
5
  from videollama2.utils import disable_torch_init
 
6
 
7
  class EndpointHandler:
8
  def __init__(self, path: str = ""):
@@ -17,17 +18,17 @@ class EndpointHandler:
17
  self.model, self.processor, self.tokenizer = model_init(self.model_path)
18
 
19
  def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
20
- print(f"Received data: {data}") # Debugging: Print received data
21
-
22
  modal = data.get("modal", "video")
23
  modal_path = data.get("modal_path", "")
24
  instruct = data.get("instruct", "")
25
-
26
- print(f"Modal: {modal}, Modal Path: {modal_path}, Instruct: {instruct}") # Debugging: Print extracted values
27
-
28
  if not modal_path or not instruct:
29
  raise ValueError("Both 'modal_path' and 'instruct' must be provided in the input data.")
30
-
31
  # Perform inference
32
  output = mm_infer(
33
  self.processor[modal](modal_path),
@@ -37,7 +38,7 @@ class EndpointHandler:
37
  do_sample=False,
38
  modal=modal
39
  )
40
-
41
  return [{"output": output}]
42
 
43
 
 
3
  sys.path.append('./')
4
  from videollama2 import model_init, mm_infer
5
  from videollama2.utils import disable_torch_init
6
+ import logging
7
 
8
  class EndpointHandler:
9
  def __init__(self, path: str = ""):
 
18
  self.model, self.processor, self.tokenizer = model_init(self.model_path)
19
 
20
  def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
21
+ logging.info(f"Received data: {data}") # Debugging: Print received data
22
+
23
  modal = data.get("modal", "video")
24
  modal_path = data.get("modal_path", "")
25
  instruct = data.get("instruct", "")
26
+
27
+ logging.info(f"Modal: {modal}, Modal Path: {modal_path}, Instruct: {instruct}") # Debugging: Print extracted values
28
+
29
  if not modal_path or not instruct:
30
  raise ValueError("Both 'modal_path' and 'instruct' must be provided in the input data.")
31
+
32
  # Perform inference
33
  output = mm_infer(
34
  self.processor[modal](modal_path),
 
38
  do_sample=False,
39
  modal=modal
40
  )
41
+
42
  return [{"output": output}]
43
 
44