zhiqiulin commited on
Commit
5041f6c
β€’
1 Parent(s): 127f381

Rename app1.py to app.py

Browse files
Files changed (1) hide show
  1. app1.py β†’ app.py +7 -51
app1.py β†’ app.py RENAMED
@@ -1,64 +1,20 @@
1
- # import gradio as gr
2
- # import spaces
3
-
4
- # # Initialize the model only once, outside of any function
5
- # # Ensure that CUDA initialization happens within the worker process
6
- # model_pipe = None
7
-
8
- # @spaces.GPU
9
- # def generate(model_name, image, text):
10
- # global model_pipe
11
- # import torch
12
- # torch.jit.script = lambda f: f
13
-
14
- # from t2v_metrics import VQAScore, list_all_vqascore_models
15
-
16
- # if model_pipe is None:
17
- # print("Initializing model...")
18
- # model_pipe = VQAScore(model="clip-flant5-xl", device="cuda") # our recommended scoring model
19
- # # model_pipe.to("cuda")
20
-
21
- # print(list_all_vqascore_models())
22
- # print("Image:", image)
23
- # print("Text:", text)
24
-
25
- # print("Generating!")
26
- # result = model_pipe(images=[image], texts=[text])
27
- # return result
28
-
29
  import gradio as gr
30
  import spaces
31
- import os
 
 
 
 
32
 
33
  # Global model variable, but do not initialize or move to CUDA here
34
- model_pipe = None
35
 
36
  @spaces.GPU
37
  def generate(model_name, image, text):
38
- global model_pipe
39
-
40
- # Debugging lines to trace CUDA initialization
41
- import torch
42
-
43
- print(f"PID: {os.getpid()}")
44
- print(f"Before import: CUDA available: {torch.cuda.is_available()}")
45
-
46
- torch.jit.script = lambda f: f # Avoid script error in lambda
47
-
48
- from t2v_metrics import VQAScore, list_all_vqascore_models
49
-
50
- print(f"After import: CUDA available: {torch.cuda.is_available()}")
51
-
52
- # Worker Process: Perform all GPU-related initializations here
53
- if model_pipe is None:
54
- print("Initializing model in PID:", os.getpid())
55
- model_pipe = VQAScore(model="clip-flant5-xl", device="cuda") # our recommended scoring model
56
- print(f"Model initialized: CUDA available: {torch.cuda.is_available()}")
57
-
58
  print(list_all_vqascore_models()) # Debug: List available models
59
  print("Image:", image) # Debug: Print image path
60
  print("Text:", text) # Debug: Print text input
61
-
62
  print("Generating!")
63
  # Wrap the model call in a try-except block to capture and debug CUDA errors
64
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import spaces
3
+ import torch
4
+
5
+ torch.jit.script = lambda f: f # Avoid script error in lambda
6
+
7
+ from t2v_metrics import VQAScore, list_all_vqascore_models
8
 
9
  # Global model variable, but do not initialize or move to CUDA here
10
+ model_pipe = VQAScore(model="clip-flant5-xl", device="cpu") # our recommended scoring model
11
 
12
  @spaces.GPU
13
  def generate(model_name, image, text):
14
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  print(list_all_vqascore_models()) # Debug: List available models
16
  print("Image:", image) # Debug: Print image path
17
  print("Text:", text) # Debug: Print text input
 
18
  print("Generating!")
19
  # Wrap the model call in a try-except block to capture and debug CUDA errors
20
  try: