Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -130,6 +130,7 @@ def preprocess_video_in(video_path):
|
|
130 |
gr.update(open=False) # video_in_drawer
|
131 |
]
|
132 |
|
|
|
133 |
def get_point(point_type, tracking_points, trackings_input_label, input_first_frame_image, evt: gr.SelectData):
|
134 |
print(f"You selected {evt.value} at {evt.index} from {evt.target}")
|
135 |
|
@@ -217,7 +218,8 @@ def load_model(checkpoint):
|
|
217 |
# return [sam2_checkpoint, model_cfg]
|
218 |
|
219 |
|
220 |
-
|
|
|
221 |
def get_mask_sam_process(
|
222 |
stored_inference_state,
|
223 |
input_first_frame_image,
|
@@ -313,7 +315,7 @@ def get_mask_sam_process(
|
|
313 |
# return gr.update(visible=True), "output_first_frame.jpg", frame_names, predictor, inference_state, gr.update(choices=available_frames_to_check, value=working_frame, visible=True)
|
314 |
return "output_first_frame.jpg", frame_names, predictor, inference_state, gr.update(choices=available_frames_to_check, value=working_frame, visible=False)
|
315 |
|
316 |
-
@spaces.GPU(duration=
|
317 |
def propagate_to_all(video_in, checkpoint, stored_inference_state, stored_frame_names, video_frames_dir, vis_frame_type, available_frames_to_check, working_frame, progress=gr.Progress(track_tqdm=True)):
|
318 |
#### PROPAGATION ####
|
319 |
sam2_checkpoint, model_cfg = load_model(checkpoint)
|
@@ -415,6 +417,8 @@ def switch_working_frame(working_frame, scanned_frames, video_frames_dir):
|
|
415 |
new_working_frame = os.path.join(video_frames_dir, scanned_frames[ann_frame_idx])
|
416 |
return gr.State([]), gr.State([]), new_working_frame, new_working_frame
|
417 |
|
|
|
|
|
418 |
def reset_propagation(first_frame_path, predictor, stored_inference_state):
|
419 |
|
420 |
predictor.reset_state(stored_inference_state)
|
@@ -609,4 +613,4 @@ with gr.Blocks(css=css) as demo:
|
|
609 |
outputs = [output_propagated, output_video, working_frame, available_frames_to_check, reset_prpgt_brn]
|
610 |
)
|
611 |
|
612 |
-
demo.launch(
|
|
|
130 |
gr.update(open=False) # video_in_drawer
|
131 |
]
|
132 |
|
133 |
+
@spaces.GPU(duration=120)
|
134 |
def get_point(point_type, tracking_points, trackings_input_label, input_first_frame_image, evt: gr.SelectData):
|
135 |
print(f"You selected {evt.value} at {evt.index} from {evt.target}")
|
136 |
|
|
|
218 |
# return [sam2_checkpoint, model_cfg]
|
219 |
|
220 |
|
221 |
+
|
222 |
+
@spaces.GPU(duration=120)
|
223 |
def get_mask_sam_process(
|
224 |
stored_inference_state,
|
225 |
input_first_frame_image,
|
|
|
315 |
# return gr.update(visible=True), "output_first_frame.jpg", frame_names, predictor, inference_state, gr.update(choices=available_frames_to_check, value=working_frame, visible=True)
|
316 |
return "output_first_frame.jpg", frame_names, predictor, inference_state, gr.update(choices=available_frames_to_check, value=working_frame, visible=False)
|
317 |
|
318 |
+
@spaces.GPU(duration=120)
|
319 |
def propagate_to_all(video_in, checkpoint, stored_inference_state, stored_frame_names, video_frames_dir, vis_frame_type, available_frames_to_check, working_frame, progress=gr.Progress(track_tqdm=True)):
|
320 |
#### PROPAGATION ####
|
321 |
sam2_checkpoint, model_cfg = load_model(checkpoint)
|
|
|
417 |
new_working_frame = os.path.join(video_frames_dir, scanned_frames[ann_frame_idx])
|
418 |
return gr.State([]), gr.State([]), new_working_frame, new_working_frame
|
419 |
|
420 |
+
|
421 |
+
@spaces.GPU(duration=120)
|
422 |
def reset_propagation(first_frame_path, predictor, stored_inference_state):
|
423 |
|
424 |
predictor.reset_state(stored_inference_state)
|
|
|
613 |
outputs = [output_propagated, output_video, working_frame, available_frames_to_check, reset_prpgt_brn]
|
614 |
)
|
615 |
|
616 |
+
demo.launch()
|