Aliayub1995 commited on
Commit
4d795e7
1 Parent(s): 5073d9c

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +28 -7
handler.py CHANGED
@@ -4,7 +4,7 @@ sys.path.append('./')
4
  from videollama2 import model_init, mm_infer
5
  from videollama2.utils import disable_torch_init
6
  import logging
7
- import numpy as np
8
 
9
  class EndpointHandler:
10
  def __init__(self, path: str = ""):
@@ -18,16 +18,37 @@ class EndpointHandler:
18
  self.model_path = 'Aliayub1995/VideoLLaMA2-7B'
19
  self.model, self.processor, self.tokenizer = model_init(self.model_path)
20
 
21
- def __call__(self, video_tensor: np.ndarray) -> List[Dict[str, Any]]:
22
- logging.info("Received video tensor") # Debugging: Confirm video tensor received
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- # Default values
25
- modal = "video"
26
- instruct = "Can you explain each scene and provide the exact time of the video in which it happened in this format [start_time: end_time]: Description, [start_time: end_time]: Description ..."
 
 
 
 
 
 
 
27
 
28
  # Perform inference
29
  output = mm_infer(
30
- self.processor[modal](video_tensor),
31
  instruct,
32
  model=self.model,
33
  tokenizer=self.tokenizer,
 
4
  from videollama2 import model_init, mm_infer
5
  from videollama2.utils import disable_torch_init
6
  import logging
7
+ import os
8
 
9
  class EndpointHandler:
10
  def __init__(self, path: str = ""):
 
18
  self.model_path = 'Aliayub1995/VideoLLaMA2-7B'
19
  self.model, self.processor, self.tokenizer = model_init(self.model_path)
20
 
21
+ def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
22
+ logging.info(f"Received data: {data}") # Debugging: Print received data
23
+ # Initialize variables
24
+ current_path = os.getcwd()
25
+ logging.info(f"Current Path: {current_path}")
26
+ dir = os.walk("./app")
27
+ # Iterate through the generator
28
+ for dirpath, dirnames, filenames in dir:
29
+ logging.info(f"Current Path: {dirpath}")
30
+ logging.info(f"Directories: {dirnames}")
31
+ logging.info(f"Files: {filenames}")
32
+ logging.info("-" * 40)
33
+ logging.info(f"Directory struct: {dir}")
34
+ modal = None
35
+ modal_path = None
36
+ instruct = None
37
 
38
+ # Extract input data
39
+ inputs = data.get("inputs", data)
40
+ modal = inputs.get("modal", "video")
41
+ modal_path = inputs.get("modal_path", "")
42
+ instruct = inputs.get("instruct", "")
43
+
44
+ logging.info(f"Modal: {modal}, Modal Path: {modal_path}, Instruct: {instruct}") # Debugging: Print extracted values
45
+
46
+ if not modal_path or not instruct:
47
+ raise ValueError("Both 'modal_path' and 'instruct' must be provided in the input data.")
48
 
49
  # Perform inference
50
  output = mm_infer(
51
+ self.processor[modal](modal_path),
52
  instruct,
53
  model=self.model,
54
  tokenizer=self.tokenizer,