Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -4,8 +4,10 @@ from typing import List, Tuple, Optional
|
|
4 |
import spaces
|
5 |
|
6 |
# Define the command to be executed
|
|
|
7 |
|
8 |
# Execute the command
|
|
|
9 |
|
10 |
css="""
|
11 |
div#component-18, div#component-25, div#component-35, div#component-41{
|
@@ -13,24 +15,15 @@ div#component-18, div#component-25, div#component-35, div#component-41{
|
|
13 |
}
|
14 |
"""
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
@spaces.GPU(duration=120)
|
22 |
-
def run_install(command):
|
23 |
-
result = subprocess.run(command, capture_output=True, text=True)
|
24 |
-
# Print the output and error (if any)
|
25 |
-
print("Output:\n", result.stdout)
|
26 |
-
print("Errors:\n", result.stderr)
|
27 |
-
|
28 |
-
# Check if the command was successful
|
29 |
-
if result.returncode == 0:
|
30 |
-
print("Command executed successfully.")
|
31 |
-
else:
|
32 |
-
print("Command failed with return code:", result.returncode)
|
33 |
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
import gradio as gr
|
36 |
from datetime import datetime
|
@@ -68,11 +61,7 @@ def clear_points(image):
|
|
68 |
#gr.State() # stored_inference_state
|
69 |
]
|
70 |
|
71 |
-
@spaces.GPU(duration=120)
|
72 |
def preprocess_video_in(video_path):
|
73 |
-
# command = ["python", "setup.py", "build_ext", "--inplace"]
|
74 |
-
command = ["pip", "install", "--no-build-isolation", "-e", "."]
|
75 |
-
run_install(command)
|
76 |
|
77 |
# Generate a unique ID based on the current date and time
|
78 |
unique_id = datetime.now().strftime('%Y%m%d%H%M%S')
|
@@ -250,7 +239,6 @@ def get_mask_sam_process(
|
|
250 |
print("MODEL LOADED")
|
251 |
|
252 |
# set predictor
|
253 |
-
global predictor
|
254 |
predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint)
|
255 |
print("PREDICTOR READY")
|
256 |
|
@@ -325,13 +313,12 @@ def get_mask_sam_process(
|
|
325 |
print(available_frames_to_check)
|
326 |
|
327 |
# return gr.update(visible=True), "output_first_frame.jpg", frame_names, predictor, inference_state, gr.update(choices=available_frames_to_check, value=working_frame, visible=True)
|
328 |
-
return "output_first_frame.jpg", frame_names, inference_state, gr.update(choices=available_frames_to_check, value=working_frame, visible=False)
|
329 |
|
330 |
@spaces.GPU(duration=120)
|
331 |
def propagate_to_all(video_in, checkpoint, stored_inference_state, stored_frame_names, video_frames_dir, vis_frame_type, available_frames_to_check, working_frame, progress=gr.Progress(track_tqdm=True)):
|
332 |
#### PROPAGATION ####
|
333 |
sam2_checkpoint, model_cfg = load_model(checkpoint)
|
334 |
-
global predictor
|
335 |
predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint)
|
336 |
|
337 |
inference_state = stored_inference_state
|
@@ -432,7 +419,7 @@ def switch_working_frame(working_frame, scanned_frames, video_frames_dir):
|
|
432 |
|
433 |
|
434 |
@spaces.GPU(duration=120)
|
435 |
-
def reset_propagation(first_frame_path, stored_inference_state):
|
436 |
|
437 |
predictor.reset_state(stored_inference_state)
|
438 |
# print(f"RESET State: {stored_inference_state} ")
|
@@ -445,7 +432,7 @@ with gr.Blocks(css=css) as demo:
|
|
445 |
trackings_input_label = gr.State([])
|
446 |
video_frames_dir = gr.State()
|
447 |
scanned_frames = gr.State()
|
448 |
-
|
449 |
stored_inference_state = gr.State()
|
450 |
stored_frame_names = gr.State()
|
451 |
available_frames_to_check = gr.State([])
|
@@ -601,7 +588,7 @@ with gr.Blocks(css=css) as demo:
|
|
601 |
outputs = [
|
602 |
output_result,
|
603 |
stored_frame_names,
|
604 |
-
|
605 |
stored_inference_state,
|
606 |
working_frame,
|
607 |
],
|
@@ -610,7 +597,7 @@ with gr.Blocks(css=css) as demo:
|
|
610 |
|
611 |
reset_prpgt_brn.click(
|
612 |
fn = reset_propagation,
|
613 |
-
inputs = [first_frame_path, stored_inference_state],
|
614 |
outputs = [points_map, tracking_points, trackings_input_label, output_propagated, stored_inference_state, output_result, available_frames_to_check, input_first_frame_image, working_frame, reset_prpgt_brn],
|
615 |
queue=False
|
616 |
)
|
@@ -626,6 +613,4 @@ with gr.Blocks(css=css) as demo:
|
|
626 |
outputs = [output_propagated, output_video, working_frame, available_frames_to_check, reset_prpgt_brn]
|
627 |
)
|
628 |
|
629 |
-
|
630 |
-
|
631 |
demo.launch()
|
|
|
4 |
import spaces
|
5 |
|
6 |
# Define the command to be executed
|
7 |
+
command = ["python", "setup.py", "build_ext", "--inplace"]
|
8 |
|
9 |
# Execute the command
|
10 |
+
result = subprocess.run(command, capture_output=True, text=True)
|
11 |
|
12 |
css="""
|
13 |
div#component-18, div#component-25, div#component-35, div#component-41{
|
|
|
15 |
}
|
16 |
"""
|
17 |
|
18 |
+
# Print the output and error (if any)
|
19 |
+
print("Output:\n", result.stdout)
|
20 |
+
print("Errors:\n", result.stderr)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
# Check if the command was successful
|
23 |
+
if result.returncode == 0:
|
24 |
+
print("Command executed successfully.")
|
25 |
+
else:
|
26 |
+
print("Command failed with return code:", result.returncode)
|
27 |
|
28 |
import gradio as gr
|
29 |
from datetime import datetime
|
|
|
61 |
#gr.State() # stored_inference_state
|
62 |
]
|
63 |
|
|
|
64 |
def preprocess_video_in(video_path):
|
|
|
|
|
|
|
65 |
|
66 |
# Generate a unique ID based on the current date and time
|
67 |
unique_id = datetime.now().strftime('%Y%m%d%H%M%S')
|
|
|
239 |
print("MODEL LOADED")
|
240 |
|
241 |
# set predictor
|
|
|
242 |
predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint)
|
243 |
print("PREDICTOR READY")
|
244 |
|
|
|
313 |
print(available_frames_to_check)
|
314 |
|
315 |
# return gr.update(visible=True), "output_first_frame.jpg", frame_names, predictor, inference_state, gr.update(choices=available_frames_to_check, value=working_frame, visible=True)
|
316 |
+
return "output_first_frame.jpg", frame_names, predictor, inference_state, gr.update(choices=available_frames_to_check, value=working_frame, visible=False)
|
317 |
|
318 |
@spaces.GPU(duration=120)
|
319 |
def propagate_to_all(video_in, checkpoint, stored_inference_state, stored_frame_names, video_frames_dir, vis_frame_type, available_frames_to_check, working_frame, progress=gr.Progress(track_tqdm=True)):
|
320 |
#### PROPAGATION ####
|
321 |
sam2_checkpoint, model_cfg = load_model(checkpoint)
|
|
|
322 |
predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint)
|
323 |
|
324 |
inference_state = stored_inference_state
|
|
|
419 |
|
420 |
|
421 |
@spaces.GPU(duration=120)
|
422 |
+
def reset_propagation(first_frame_path, predictor, stored_inference_state):
|
423 |
|
424 |
predictor.reset_state(stored_inference_state)
|
425 |
# print(f"RESET State: {stored_inference_state} ")
|
|
|
432 |
trackings_input_label = gr.State([])
|
433 |
video_frames_dir = gr.State()
|
434 |
scanned_frames = gr.State()
|
435 |
+
loaded_predictor = gr.State()
|
436 |
stored_inference_state = gr.State()
|
437 |
stored_frame_names = gr.State()
|
438 |
available_frames_to_check = gr.State([])
|
|
|
588 |
outputs = [
|
589 |
output_result,
|
590 |
stored_frame_names,
|
591 |
+
loaded_predictor,
|
592 |
stored_inference_state,
|
593 |
working_frame,
|
594 |
],
|
|
|
597 |
|
598 |
reset_prpgt_brn.click(
|
599 |
fn = reset_propagation,
|
600 |
+
inputs = [first_frame_path, loaded_predictor, stored_inference_state],
|
601 |
outputs = [points_map, tracking_points, trackings_input_label, output_propagated, stored_inference_state, output_result, available_frames_to_check, input_first_frame_image, working_frame, reset_prpgt_brn],
|
602 |
queue=False
|
603 |
)
|
|
|
613 |
outputs = [output_propagated, output_video, working_frame, available_frames_to_check, reset_prpgt_brn]
|
614 |
)
|
615 |
|
|
|
|
|
616 |
demo.launch()
|