Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -79,10 +79,10 @@ def extract_frames_with_labels(video_path, base_output_dir="frames"):
|
|
79 |
# Release the video capture object
|
80 |
video_capture.release()
|
81 |
|
82 |
-
return frame_data
|
83 |
|
84 |
# Define a function to run your script with selected inputs
|
85 |
-
def run_xportrait(source_image, driving_video, seed, uc_scale, best_frame, out_frames, num_mix, ddim_steps):
|
86 |
|
87 |
# Create a unique output directory name based on current date and time
|
88 |
output_dir_base = "outputs"
|
@@ -123,6 +123,7 @@ def run_xportrait(source_image, driving_video, seed, uc_scale, best_frame, out_f
|
|
123 |
except subprocess.CalledProcessError as e:
|
124 |
return f"An error occurred: {e}", None
|
125 |
|
|
|
126 |
# Set up Gradio interface
|
127 |
css="""
|
128 |
div#frames-gallery{
|
@@ -152,8 +153,8 @@ with gr.Blocks(css=css) as demo:
|
|
152 |
with gr.Row():
|
153 |
best_frame = gr.Number(value=36, label="Best Frame", info="specify the frame index in the driving video where the head pose best matches the source image (note: precision of best_frame index might affect the final quality)")
|
154 |
out_frames = gr.Number(value=-1, label="Out Frames", info="number of generation frames")
|
155 |
-
with gr.Accordion("Driving video Frames"):
|
156 |
-
driving_frames = gr.Gallery(show_label=True, columns=6, height=
|
157 |
with gr.Row():
|
158 |
seed = gr.Number(value=999, label="Seed")
|
159 |
uc_scale = gr.Number(value=5, label="UC Scale")
|
@@ -182,10 +183,10 @@ with gr.Blocks(css=css) as demo:
|
|
182 |
""")
|
183 |
|
184 |
|
185 |
-
driving_video.
|
186 |
fn = extract_frames_with_labels,
|
187 |
inputs = [driving_video],
|
188 |
-
outputs = [driving_frames],
|
189 |
queue = False
|
190 |
)
|
191 |
|
|
|
79 |
# Release the video capture object
|
80 |
video_capture.release()
|
81 |
|
82 |
+
return frame_data, gr.update(open=True)
|
83 |
|
84 |
# Define a function to run your script with selected inputs
|
85 |
+
def run_xportrait(source_image, driving_video, seed, uc_scale, best_frame, out_frames, num_mix, ddim_steps, progress=gr.Progress(track_tqdm=True)):
|
86 |
|
87 |
# Create a unique output directory name based on current date and time
|
88 |
output_dir_base = "outputs"
|
|
|
123 |
except subprocess.CalledProcessError as e:
|
124 |
return f"An error occurred: {e}", None
|
125 |
|
126 |
+
|
127 |
# Set up Gradio interface
|
128 |
css="""
|
129 |
div#frames-gallery{
|
|
|
153 |
with gr.Row():
|
154 |
best_frame = gr.Number(value=36, label="Best Frame", info="specify the frame index in the driving video where the head pose best matches the source image (note: precision of best_frame index might affect the final quality)")
|
155 |
out_frames = gr.Number(value=-1, label="Out Frames", info="number of generation frames")
|
156 |
+
with gr.Accordion("Driving video Frames", open=False) as frames_gallery_panel:
|
157 |
+
driving_frames = gr.Gallery(show_label=True, columns=6, height=380, elem_id="frames-gallery")
|
158 |
with gr.Row():
|
159 |
seed = gr.Number(value=999, label="Seed")
|
160 |
uc_scale = gr.Number(value=5, label="UC Scale")
|
|
|
183 |
""")
|
184 |
|
185 |
|
186 |
+
driving_video.change(
|
187 |
fn = extract_frames_with_labels,
|
188 |
inputs = [driving_video],
|
189 |
+
outputs = [driving_frames, frames_gallery_panel],
|
190 |
queue = False
|
191 |
)
|
192 |
|