Ggh596 commited on
Commit
752438f
·
verified ·
1 Parent(s): 4ac32a6

Upload 5 files

Browse files
ui/tabs/extras_tab.py CHANGED
@@ -4,6 +4,7 @@ import shutil
4
  import roop.utilities as util
5
  import roop.util_ffmpeg as ffmpeg
6
  import roop.globals
 
7
 
8
  frame_filters_map = {
9
  "Colorize B/W Images (Deoldify Artistic)" : {"colorizer" : {"subtype": "deoldify_artistic"}},
@@ -62,6 +63,26 @@ def extras_tab():
62
  with gr.Column():
63
  extras_chk_creategif = gr.Checkbox(label='Create GIF from video', value=False)
64
  extras_create_video=gr.Button("Create")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  with gr.Row(variant='panel'):
66
  with gr.Accordion(label="Full frame processing", open=True):
67
  with gr.Row(variant='panel'):
@@ -78,7 +99,9 @@ def extras_tab():
78
  start_cut_video.click(fn=on_cut_video, inputs=[files_to_process, cut_start_time, cut_end_time, extras_chk_encode], outputs=[extra_files_output])
79
  start_extract_frames.click(fn=on_extras_extract_frames, inputs=[files_to_process], outputs=[extra_files_output])
80
  start_join_videos.click(fn=on_join_videos, inputs=[files_to_process, extras_chk_encode], outputs=[extra_files_output])
81
- extras_create_video.click(fn=on_extras_create_video, inputs=[extras_images_folder, extras_fps, extras_chk_creategif], outputs=[extra_files_output])
 
 
82
  start_frame_process.click(fn=on_frame_process, inputs=[files_to_process, filterselection, upscalerselection], outputs=[extra_files_output])
83
 
84
 
@@ -115,17 +138,55 @@ def on_join_videos(files, chk_encode):
115
  gr.Error('Joining videos failed!')
116
  return resultfiles
117
 
 
 
 
 
 
 
 
 
118
 
119
-
120
- def on_extras_create_video(images_path,fps, create_gif):
121
- util.sort_rename_frames(os.path.dirname(images_path))
122
  destfilename = os.path.join(roop.globals.output_path, "img2video." + roop.globals.CFG.output_video_format)
123
- ffmpeg.create_video('', destfilename, fps, images_path)
124
- resultfiles = []
125
  if os.path.isfile(destfilename):
126
  resultfiles.append(destfilename)
127
- else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  return None
 
 
 
 
 
 
 
 
 
 
 
129
  if create_gif:
130
  gifname = util.get_destfilename_from_path(destfilename, './output', '.gif')
131
  ffmpeg.create_gif_from_video(destfilename, gifname)
@@ -161,7 +222,7 @@ def on_frame_process(files, filterselection, upscaleselection):
161
  return None
162
 
163
  if roop.globals.CFG.clear_output:
164
- shutil.rmtree(roop.globals.output_path)
165
  prepare_environment()
166
  list_files_process : list[ProcessEntry] = []
167
 
@@ -175,7 +236,7 @@ def on_frame_process(files, filterselection, upscaleselection):
175
  filter = next((x for x in frame_upscalers_map.keys() if x == upscaleselection), None)
176
  if filter is not None:
177
  processoroptions.update(frame_upscalers_map[filter])
178
- options = ProcessOptions(processoroptions, 0, 0, "all", 0, None, None, None, False)
179
  batch_process_with_options(list_files_process, options, None)
180
  outdir = pathlib.Path(roop.globals.output_path)
181
  outfiles = [str(item) for item in outdir.rglob("*") if item.is_file()]
 
4
  import roop.utilities as util
5
  import roop.util_ffmpeg as ffmpeg
6
  import roop.globals
7
+ from roop.utilities import clean_dir
8
 
9
  frame_filters_map = {
10
  "Colorize B/W Images (Deoldify Artistic)" : {"colorizer" : {"subtype": "deoldify_artistic"}},
 
63
  with gr.Column():
64
  extras_chk_creategif = gr.Checkbox(label='Create GIF from video', value=False)
65
  extras_create_video=gr.Button("Create")
66
+ with gr.Row(variant='panel'):
67
+ with gr.Column():
68
+ gr.Markdown("""
69
+ # Create video from gif
70
+ """)
71
+ with gr.Column():
72
+ extras_video_fps = gr.Slider(minimum=0, maximum=120, value=0, label="Video FPS", step=1.0, interactive=True)
73
+ with gr.Column():
74
+ extras_create_video_from_gif=gr.Button("Create")
75
+ with gr.Row(variant='panel'):
76
+ with gr.Column(scale=2):
77
+ gr.Markdown("""
78
+ # Repair video
79
+
80
+ Uses FFMpeg to fix corrupt videos.
81
+ """)
82
+ with gr.Column():
83
+ extras_repair_video=gr.Button("Repair")
84
+
85
+
86
  with gr.Row(variant='panel'):
87
  with gr.Accordion(label="Full frame processing", open=True):
88
  with gr.Row(variant='panel'):
 
99
  start_cut_video.click(fn=on_cut_video, inputs=[files_to_process, cut_start_time, cut_end_time, extras_chk_encode], outputs=[extra_files_output])
100
  start_extract_frames.click(fn=on_extras_extract_frames, inputs=[files_to_process], outputs=[extra_files_output])
101
  start_join_videos.click(fn=on_join_videos, inputs=[files_to_process, extras_chk_encode], outputs=[extra_files_output])
102
+ extras_create_video.click(fn=on_extras_create_video, inputs=[files_to_process, extras_images_folder, extras_fps, extras_chk_creategif], outputs=[extra_files_output])
103
+ extras_create_video_from_gif.click(fn=on_extras_create_video_from_gif, inputs=[files_to_process, extras_video_fps], outputs=[extra_files_output])
104
+ extras_repair_video.click(fn=on_extras_repair_video, inputs=[files_to_process], outputs=[extra_files_output])
105
  start_frame_process.click(fn=on_frame_process, inputs=[files_to_process, filterselection, upscalerselection], outputs=[extra_files_output])
106
 
107
 
 
138
  gr.Error('Joining videos failed!')
139
  return resultfiles
140
 
141
+ def on_extras_create_video_from_gif(files,fps):
142
+ if files is None:
143
+ return None
144
+
145
+ filenames = []
146
+ resultfiles = []
147
+ for f in files:
148
+ filenames.append(f.name)
149
 
 
 
 
150
  destfilename = os.path.join(roop.globals.output_path, "img2video." + roop.globals.CFG.output_video_format)
151
+ ffmpeg.create_video_from_gif(filenames[0], destfilename)
 
152
  if os.path.isfile(destfilename):
153
  resultfiles.append(destfilename)
154
+ return resultfiles
155
+
156
+
157
+ def on_extras_repair_video(files):
158
+ if files is None:
159
+ return None
160
+
161
+ resultfiles = []
162
+ for tf in files:
163
+ f = tf.name
164
+ destfile = util.get_destfilename_from_path(f, roop.globals.output_path, '_repair')
165
+ ffmpeg.repair_video(f, destfile)
166
+ if os.path.isfile(destfile):
167
+ resultfiles.append(destfile)
168
+ else:
169
+ gr.Error('Repairing video failed!')
170
+ return resultfiles
171
+
172
+
173
+
174
+
175
+
176
+ def on_extras_create_video(files, images_path,fps, create_gif):
177
+ if images_path is None:
178
  return None
179
+ resultfiles = []
180
+ if len(files) > 0 and util.is_video(files[0]) and create_gif:
181
+ destfilename = files[0]
182
+ else:
183
+ util.sort_rename_frames(os.path.dirname(images_path))
184
+ destfilename = os.path.join(roop.globals.output_path, "img2video." + roop.globals.CFG.output_video_format)
185
+ ffmpeg.create_video('', destfilename, fps, images_path)
186
+ if os.path.isfile(destfilename):
187
+ resultfiles.append(destfilename)
188
+ else:
189
+ return None
190
  if create_gif:
191
  gifname = util.get_destfilename_from_path(destfilename, './output', '.gif')
192
  ffmpeg.create_gif_from_video(destfilename, gifname)
 
222
  return None
223
 
224
  if roop.globals.CFG.clear_output:
225
+ clean_dir(roop.globals.output_path)
226
  prepare_environment()
227
  list_files_process : list[ProcessEntry] = []
228
 
 
236
  filter = next((x for x in frame_upscalers_map.keys() if x == upscaleselection), None)
237
  if filter is not None:
238
  processoroptions.update(frame_upscalers_map[filter])
239
+ options = ProcessOptions(processoroptions, 0, 0, "all", 0, None, None, 0, 128, False, False)
240
  batch_process_with_options(list_files_process, options, None)
241
  outdir = pathlib.Path(roop.globals.output_path)
242
  outfiles = [str(item) for item in outdir.rglob("*") if item.is_file()]
ui/tabs/facemgr_tab.py CHANGED
@@ -22,7 +22,7 @@ def facemgr_tab() -> None:
22
  Add multiple reference images into a faceset file.
23
  """)
24
  with gr.Row():
25
- videoimagefst = gr.Image(label="Cut face from video frame", height=576, interactive=False, visible=True)
26
  with gr.Row():
27
  frame_num_fst = gr.Slider(1, 1, value=1, label="Frame Number", info='0:00:00', step=1.0, interactive=False)
28
  fb_cutfromframe = gr.Button("Use faces from this frame", variant='secondary', interactive=False)
 
22
  Add multiple reference images into a faceset file.
23
  """)
24
  with gr.Row():
25
+ videoimagefst = gr.Image(label="Cut face from video frame", height=576, interactive=False, visible=True, format="jpeg")
26
  with gr.Row():
27
  frame_num_fst = gr.Slider(1, 1, value=1, label="Frame Number", info='0:00:00', step=1.0, interactive=False)
28
  fb_cutfromframe = gr.Button("Use faces from this frame", variant='secondary', interactive=False)
ui/tabs/faceswap_tab.py CHANGED
@@ -10,6 +10,7 @@ from roop.capturer import get_video_frame, get_video_frame_total, get_image_fram
10
  from roop.ProcessEntry import ProcessEntry
11
  from roop.ProcessOptions import ProcessOptions
12
  from roop.FaceSet import FaceSet
 
13
 
14
  last_image = None
15
 
@@ -30,7 +31,8 @@ selected_preview_index = 0
30
  is_processing = False
31
 
32
  list_files_process : list[ProcessEntry] = []
33
- no_face_choices = ["Use untouched original frame","Retry rotated", "Skip Frame", "Skip Frame if no similar face"]
 
34
 
35
  current_video_fps = 50
36
 
@@ -44,42 +46,113 @@ def faceswap_tab():
44
  with gr.Row(variant='panel'):
45
  with gr.Column(scale=2):
46
  with gr.Row():
47
- with gr.Column(min_width=160):
48
- input_faces = gr.Gallery(label="Input faces", allow_preview=False, preview=False, height=128, object_fit="scale-down", columns=8)
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  with gr.Accordion(label="Advanced Masking", open=False):
50
- chk_showmaskoffsets = gr.Checkbox(label="Show mask overlay in preview", value=False, interactive=True)
51
- mask_top = gr.Slider(0, 1.0, value=0, label="Offset Face Top", step=0.01, interactive=True)
52
- mask_bottom = gr.Slider(0, 1.0, value=0, label="Offset Face Bottom", step=0.01, interactive=True)
53
- mask_left = gr.Slider(0, 1.0, value=0, label="Offset Face Left", step=0.01, interactive=True)
54
- mask_right = gr.Slider(0, 1.0, value=0, label="Offset Face Right", step=0.01, interactive=True)
55
- mask_erosion = gr.Slider(1.0, 3.0, value=1.0, label="Erosion Iterations", step=1.00, interactive=True)
56
- mask_blur = gr.Slider(10.0, 50.0, value=20.0, label="Blur size", step=1.00, interactive=True)
57
- bt_toggle_masking = gr.Button("Toggle manual masking", variant='secondary', size='sm')
58
- selected_mask_engine = gr.Dropdown(["None", "Clip2Seg", "DFL XSeg"], value="None", label="Face masking engine")
59
- clip_text = gr.Textbox(label="List of objects to mask and restore back on fake face", value="cup,hands,hair,banana", interactive=False)
60
- bt_preview_mask = gr.Button("👥 Show Mask Preview", variant='secondary')
61
- bt_remove_selected_input_face = gr.Button("❌ Remove selected", size='sm')
62
- bt_clear_input_faces = gr.Button("💥 Clear all", variant='stop', size='sm')
63
- with gr.Column(min_width=160):
64
- target_faces = gr.Gallery(label="Target faces", allow_preview=False, preview=False, height=128, object_fit="scale-down", columns=8)
65
- bt_remove_selected_target_face = gr.Button("❌ Remove selected", size='sm')
66
- bt_add_local = gr.Button('Add local files from', size='sm')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  local_folder = gr.Textbox(show_label=False, placeholder="/content/", interactive=True)
68
  with gr.Row(variant='panel'):
69
- bt_srcfiles = gr.Files(label='Source File(s)', file_count="multiple", file_types=["image", ".fsz"], elem_id='filelist', height=233)
70
  bt_destfiles = gr.Files(label='Target File(s)', file_count="multiple", file_types=["image", "video"], elem_id='filelist', height=233)
71
  with gr.Row(variant='panel'):
72
  gr.Markdown('')
73
  forced_fps = gr.Slider(minimum=0, maximum=120, value=0, label="Video FPS", info='Overrides detected fps if not 0', step=1.0, interactive=True, container=True)
74
 
75
  with gr.Column(scale=2):
76
- previewimage = gr.Image(label="Preview Image", height=576, interactive=False, visible=True)
77
  maskimage = gr.ImageEditor(label="Manual mask Image", sources=["clipboard"], transforms="", type="numpy",
78
  brush=gr.Brush(color_mode="fixed", colors=["rgba(255, 255, 255, 1"]), interactive=True, visible=False)
79
  with gr.Row(variant='panel'):
80
- fake_preview = gr.Checkbox(label="Face swap frames", value=False)
81
- bt_refresh_preview = gr.Button("🔄 Refresh", variant='secondary', size='sm')
82
- bt_use_face_from_preview = gr.Button("Use Face from this Frame", variant='primary', size='sm')
83
  with gr.Row():
84
  preview_frame_num = gr.Slider(1, 1, value=1, label="Frame Number", info='0:00:00', step=1.0, interactive=True)
85
  with gr.Row():
@@ -88,28 +161,29 @@ def faceswap_tab():
88
  set_frame_end = gr.Button("➡ Set as End", size='sm')
89
  with gr.Row(visible=False) as dynamic_face_selection:
90
  with gr.Column(scale=2):
91
- face_selection = gr.Gallery(label="Detected faces", allow_preview=False, preview=False, height=256, object_fit="cover", columns=8)
92
  with gr.Column():
93
  bt_faceselect = gr.Button("☑ Use selected face", size='sm')
94
  bt_cancelfaceselect = gr.Button("Done", size='sm')
95
  with gr.Column():
96
  gr.Markdown(' ')
97
-
98
  with gr.Row(variant='panel'):
99
  with gr.Column(scale=1):
100
- selected_face_detection = gr.Dropdown(["First found", "All female", "All male", "All faces", "Selected face"], value="First found", label="Specify face selection for swapping")
101
  with gr.Column(scale=1):
 
 
102
  ui.globals.ui_selected_enhancer = gr.Dropdown(["None", "Codeformer", "DMDNet", "GFPGAN", "GPEN", "Restoreformer++"], value="None", label="Select post-processing")
103
 
104
  with gr.Row(variant='panel'):
105
  with gr.Column(scale=1):
106
  max_face_distance = gr.Slider(0.01, 1.0, value=0.65, label="Max Face Similarity Threshold", info="0.0 = identical 1.0 = no similarity")
107
  with gr.Column(scale=1):
108
- num_swap_steps = gr.Slider(1, 5, value=1, step=1.0, label="Number of swapping steps", info="More steps can increase likeness")
109
  with gr.Column(scale=2):
110
  ui.globals.ui_blend_ratio = gr.Slider(0.0, 1.0, value=0.65, label="Original/Enhanced image blend ratio", info="Only used with active post-processing")
111
 
112
-
113
  with gr.Row(variant='panel'):
114
  with gr.Column(scale=1):
115
  video_swapping_method = gr.Dropdown(["Extract Frames to media","In-Memory processing"], value="In-Memory processing", label="Select video processing method", interactive=True)
@@ -122,16 +196,14 @@ def faceswap_tab():
122
  roop.globals.keep_frames = gr.Checkbox(label="Keep Frames (relevant only when extracting frames)", value=False)
123
  roop.globals.wait_after_extraction = gr.Checkbox(label="Wait for user key press before creating video ", value=False)
124
 
125
-
126
-
127
  with gr.Row(variant='panel'):
128
  with gr.Column():
129
  bt_start = gr.Button("▶ Start", variant='primary')
130
- gr.Button("👀 Open Output Folder", size='sm').click(fn=lambda: util.open_folder(roop.globals.output_path))
131
  with gr.Column():
132
  bt_stop = gr.Button("⏹ Stop", variant='secondary', interactive=False)
 
133
  with gr.Column(scale=2):
134
- gr.Markdown(' ')
135
  with gr.Row(variant='panel'):
136
  with gr.Column():
137
  resultfiles = gr.Files(label='Processed File(s)', interactive=False)
@@ -140,11 +212,17 @@ def faceswap_tab():
140
  resultvideo = gr.Video(label='Final Video', interactive=False, visible=False)
141
 
142
  previewinputs = [preview_frame_num, bt_destfiles, fake_preview, ui.globals.ui_selected_enhancer, selected_face_detection,
143
- max_face_distance, ui.globals.ui_blend_ratio, selected_mask_engine, clip_text, no_face_action, vr_mode, autorotate, maskimage, chk_showmaskoffsets, num_swap_steps]
144
  previewoutputs = [previewimage, maskimage, preview_frame_num]
145
- input_faces.select(on_select_input_face, None, None).then(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs)
 
 
 
 
 
 
146
  bt_remove_selected_input_face.click(fn=remove_selected_input_face, outputs=[input_faces])
147
- bt_srcfiles.change(fn=on_srcfile_changed, show_progress='full', inputs=bt_srcfiles, outputs=[dynamic_face_selection, face_selection, input_faces])
148
 
149
  mask_top.release(fn=on_mask_top_changed, inputs=[mask_top], show_progress='hidden')
150
  mask_bottom.release(fn=on_mask_bottom_changed, inputs=[mask_bottom], show_progress='hidden')
@@ -154,34 +232,32 @@ def faceswap_tab():
154
  mask_blur.release(fn=on_mask_blur_changed, inputs=[mask_blur], show_progress='hidden')
155
  selected_mask_engine.change(fn=on_mask_engine_changed, inputs=[selected_mask_engine], outputs=[clip_text], show_progress='hidden')
156
 
157
-
158
  target_faces.select(on_select_target_face, None, None)
159
  bt_remove_selected_target_face.click(fn=remove_selected_target_face, outputs=[target_faces])
160
 
161
  forced_fps.change(fn=on_fps_changed, inputs=[forced_fps], show_progress='hidden')
162
- bt_destfiles.change(fn=on_destfiles_changed, inputs=[bt_destfiles], outputs=[preview_frame_num, text_frame_clip], show_progress='hidden').then(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs, show_progress='hidden')
163
- bt_destfiles.select(fn=on_destfiles_selected, outputs=[preview_frame_num, text_frame_clip, forced_fps], show_progress='hidden').then(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs, show_progress='hidden')
164
  bt_destfiles.clear(fn=on_clear_destfiles, outputs=[target_faces, selected_face_detection])
165
  resultfiles.select(fn=on_resultfiles_selected, inputs=[resultfiles], outputs=[resultimage, resultvideo])
166
 
167
  face_selection.select(on_select_face, None, None)
168
  bt_faceselect.click(fn=on_selected_face, outputs=[input_faces, target_faces, selected_face_detection])
169
  bt_cancelfaceselect.click(fn=on_end_face_selection, outputs=[dynamic_face_selection, face_selection])
170
-
171
- bt_clear_input_faces.click(fn=on_clear_input_faces, outputs=[input_faces])
172
 
 
173
 
174
  bt_add_local.click(fn=on_add_local_folder, inputs=[local_folder], outputs=[bt_destfiles])
175
  bt_preview_mask.click(fn=on_preview_mask, inputs=[preview_frame_num, bt_destfiles, clip_text, selected_mask_engine], outputs=[previewimage])
176
 
177
  start_event = bt_start.click(fn=start_swap,
178
- inputs=[ui.globals.ui_selected_enhancer, selected_face_detection, roop.globals.keep_frames, roop.globals.wait_after_extraction,
179
- roop.globals.skip_audio, max_face_distance, ui.globals.ui_blend_ratio, selected_mask_engine, clip_text,video_swapping_method, no_face_action, vr_mode, autorotate, num_swap_steps, maskimage],
180
  outputs=[bt_start, bt_stop, resultfiles], show_progress='full')
181
- after_swap_event = start_event.then(fn=on_resultfiles_finished, inputs=[resultfiles], outputs=[resultimage, resultvideo])
182
-
183
  bt_stop.click(fn=stop_swap, cancels=[start_event, after_swap_event], outputs=[bt_start, bt_stop], queue=False)
184
-
185
  bt_refresh_preview.click(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs)
186
  bt_toggle_masking.click(fn=on_toggle_masking, inputs=[previewimage, maskimage], outputs=[previewimage, maskimage])
187
  fake_preview.change(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs)
@@ -190,8 +266,7 @@ def faceswap_tab():
190
  set_frame_start.click(fn=on_set_frame, inputs=[set_frame_start, preview_frame_num], outputs=[text_frame_clip])
191
  set_frame_end.click(fn=on_set_frame, inputs=[set_frame_end, preview_frame_num], outputs=[text_frame_clip])
192
 
193
-
194
-
195
  def on_mask_top_changed(mask_offset):
196
  set_mask_offset(0, mask_offset)
197
 
@@ -230,7 +305,6 @@ def on_mask_engine_changed(mask_engine):
230
  return gr.Textbox(interactive=False)
231
 
232
 
233
-
234
  def on_add_local_folder(folder):
235
  files = util.get_local_files_from_folder(folder)
236
  if files is None:
@@ -244,9 +318,8 @@ def on_srcfile_changed(srcfiles, progress=gr.Progress()):
244
  IS_INPUT = True
245
 
246
  if srcfiles is None or len(srcfiles) < 1:
247
- return gr.Column(visible=False), None, ui.globals.ui_input_thumbs
248
 
249
- thumbs = []
250
  for f in srcfiles:
251
  source_path = f.name
252
  if source_path.lower().endswith('fsz'):
@@ -296,13 +369,7 @@ def on_srcfile_changed(srcfiles, progress=gr.Progress()):
296
  roop.globals.INPUT_FACESETS.append(face_set)
297
 
298
  progress(1.0)
299
-
300
- # old style with selecting input faces commented out
301
- # if len(thumbs) < 1:
302
- # return gr.Column(visible=False), None, ui.globals.ui_input_thumbs
303
- # return gr.Column(visible=True), thumbs, gr.Gallery(visible=True)
304
-
305
- return gr.Column(visible=False), None, ui.globals.ui_input_thumbs
306
 
307
 
308
  def on_select_input_face(evt: gr.SelectData):
@@ -323,13 +390,53 @@ def remove_selected_input_face():
323
 
324
  return ui.globals.ui_input_thumbs
325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
  def on_select_target_face(evt: gr.SelectData):
327
  global SELECTED_TARGET_FACE_INDEX
328
 
329
  SELECTED_TARGET_FACE_INDEX = evt.index
330
 
331
  def remove_selected_target_face():
332
- if len(roop.globals.TARGET_FACES) > SELECTED_TARGET_FACE_INDEX:
333
  f = roop.globals.TARGET_FACES.pop(SELECTED_TARGET_FACE_INDEX)
334
  del f
335
  if len(ui.globals.ui_target_thumbs) > SELECTED_TARGET_FACE_INDEX:
@@ -338,9 +445,6 @@ def remove_selected_target_face():
338
  return ui.globals.ui_target_thumbs
339
 
340
 
341
-
342
-
343
-
344
  def on_use_face_from_selected(files, frame_num):
345
  global IS_INPUT, SELECTION_FACES_DATA
346
 
@@ -368,6 +472,9 @@ def on_use_face_from_selected(files, frame_num):
368
  else:
369
  gr.Info('No faces detected!')
370
  roop.globals.target_path = None
 
 
 
371
 
372
  if len(thumbs) == 1:
373
  roop.globals.TARGET_FACES.append(SELECTION_FACES_DATA[0][0])
@@ -377,11 +484,10 @@ def on_use_face_from_selected(files, frame_num):
377
  return gr.Row(visible=True), thumbs, gr.Gallery(visible=True), gr.Dropdown(visible=True)
378
 
379
 
380
-
381
  def on_select_face(evt: gr.SelectData): # SelectData is a subclass of EventData
382
  global SELECTED_FACE_INDEX
383
  SELECTED_FACE_INDEX = evt.index
384
-
385
 
386
  def on_selected_face():
387
  global IS_INPUT, SELECTED_FACE_INDEX, SELECTION_FACES_DATA
@@ -399,7 +505,7 @@ def on_selected_face():
399
  roop.globals.TARGET_FACES.append(fd[0])
400
  ui.globals.ui_target_thumbs.append(image)
401
  return gr.Gallery(visible=True), ui.globals.ui_target_thumbs, gr.Dropdown(value='Selected face')
402
-
403
  # bt_faceselect.click(fn=on_selected_face, outputs=[dynamic_face_selection, face_selection, input_faces, target_faces])
404
 
405
  def on_end_face_selection():
@@ -407,7 +513,7 @@ def on_end_face_selection():
407
 
408
 
409
  def on_preview_frame_changed(frame_num, files, fake_preview, enhancer, detection, face_distance, blend_ratio,
410
- selected_mask_engine, clip_text, no_face_action, vr_mode, auto_rotate, maskimage, show_face_area, num_steps):
411
  global SELECTED_INPUT_FACE_INDEX, manual_masking, current_video_fps
412
 
413
  from roop.core import live_swap, get_processing_plugins
@@ -454,6 +560,8 @@ def on_preview_frame_changed(frame_num, files, fake_preview, enhancer, detection
454
  roop.globals.no_face_action = index_of_no_face_action(no_face_action)
455
  roop.globals.vr_mode = vr_mode
456
  roop.globals.autorotate_faces = auto_rotate
 
 
457
 
458
  mask_engine = map_mask_engine(selected_mask_engine, clip_text)
459
 
@@ -464,7 +572,7 @@ def on_preview_frame_changed(frame_num, files, fake_preview, enhancer, detection
464
  face_index = 0
465
 
466
  options = ProcessOptions(get_processing_plugins(mask_engine), roop.globals.distance_threshold, roop.globals.blend_ratio,
467
- roop.globals.face_swap_mode, face_index, clip_text, maskimage, num_steps, show_face_area)
468
 
469
  current_frame = live_swap(current_frame, options)
470
  if current_frame is None:
@@ -481,7 +589,6 @@ def map_mask_engine(selected_mask_engine, clip_text):
481
  else:
482
  mask_engine = None
483
  return mask_engine
484
-
485
 
486
 
487
  def on_toggle_masking(previewimage, mask):
@@ -513,7 +620,6 @@ def on_set_frame(sender:str, frame_num):
513
  list_files_process[idx].endframe = max(frame_num, start)
514
 
515
  return gen_processing_text(list_files_process[idx].startframe,list_files_process[idx].endframe)
516
-
517
 
518
 
519
  def on_preview_mask(frame_num, files, clip_text, mask_engine):
@@ -538,13 +644,12 @@ def on_preview_mask(frame_num, files, clip_text, mask_engine):
538
  elif mask_engine == "DFL XSeg":
539
  mask_engine = "mask_xseg"
540
  options = ProcessOptions(get_processing_plugins(mask_engine), roop.globals.distance_threshold, roop.globals.blend_ratio,
541
- "all", 0, clip_text, None, 0, False, True)
542
 
543
  current_frame = live_swap(current_frame, options)
544
  return util.convert_to_gradio(current_frame)
545
 
546
 
547
-
548
  def on_clear_input_faces():
549
  ui.globals.ui_input_thumbs.clear()
550
  roop.globals.INPUT_FACESETS.clear()
@@ -566,6 +671,8 @@ def translate_swap_mode(dropdown_text):
566
  return "selected"
567
  elif dropdown_text == "First found":
568
  return "first"
 
 
569
  elif dropdown_text == "All female":
570
  return "all_female"
571
  elif dropdown_text == "All male":
@@ -574,9 +681,8 @@ def translate_swap_mode(dropdown_text):
574
  return "all"
575
 
576
 
577
-
578
- def start_swap( enhancer, detection, keep_frames, wait_after_extraction, skip_audio, face_distance, blend_ratio,
579
- selected_mask_engine, clip_text, processing_method, no_face_action, vr_mode, autorotate, num_swap_steps, imagemask, progress=gr.Progress()):
580
  from ui.main import prepare_environment
581
  from roop.core import batch_process_regular
582
  global is_processing, list_files_process
@@ -585,7 +691,7 @@ def start_swap( enhancer, detection, keep_frames, wait_after_extraction, skip_au
585
  return gr.Button(variant="primary"), None, None
586
 
587
  if roop.globals.CFG.clear_output:
588
- shutil.rmtree(roop.globals.output_path)
589
 
590
  if not util.is_installed("ffmpeg"):
591
  msg = "ffmpeg is not installed! No video processing possible."
@@ -604,6 +710,7 @@ def start_swap( enhancer, detection, keep_frames, wait_after_extraction, skip_au
604
  roop.globals.no_face_action = index_of_no_face_action(no_face_action)
605
  roop.globals.vr_mode = vr_mode
606
  roop.globals.autorotate_faces = autorotate
 
607
  mask_engine = map_mask_engine(selected_mask_engine, clip_text)
608
 
609
  if roop.globals.face_swap_mode == 'selected':
@@ -618,7 +725,7 @@ def start_swap( enhancer, detection, keep_frames, wait_after_extraction, skip_au
618
  roop.globals.video_quality = roop.globals.CFG.video_quality
619
  roop.globals.max_memory = roop.globals.CFG.memory_limit if roop.globals.CFG.memory_limit > 0 else None
620
 
621
- batch_process_regular(list_files_process, mask_engine, clip_text, processing_method == "In-Memory processing", imagemask, num_swap_steps, progress, SELECTED_INPUT_FACE_INDEX)
622
  is_processing = False
623
  outdir = pathlib.Path(roop.globals.output_path)
624
  outfiles = [str(item) for item in outdir.rglob("*") if item.is_file()]
@@ -659,15 +766,17 @@ def on_destfiles_changed(destfiles):
659
 
660
  if util.is_video(filename) or filename.lower().endswith('gif'):
661
  total_frames = get_video_frame_total(filename)
662
- current_video_fps = util.detect_fps(filename)
 
 
 
 
663
  else:
664
  total_frames = 1
665
  list_files_process[idx].endframe = total_frames
666
  if total_frames > 1:
667
  return gr.Slider(value=1, maximum=total_frames, info='0:00:00'), gen_processing_text(list_files_process[idx].startframe,list_files_process[idx].endframe)
668
  return gr.Slider(value=1, maximum=total_frames, info='0:00:00'), ''
669
-
670
-
671
 
672
 
673
  def on_destfiles_selected(evt: gr.SelectData):
@@ -689,8 +798,7 @@ def on_destfiles_selected(evt: gr.SelectData):
689
  if total_frames > 1:
690
  return gr.Slider(value=list_files_process[idx].startframe, maximum=total_frames, info='0:00:00'), gen_processing_text(list_files_process[idx].startframe,list_files_process[idx].endframe), fps
691
  return gr.Slider(value=1, maximum=total_frames, info='0:00:00'), gen_processing_text(0,0), fps
692
-
693
-
694
 
695
  def on_resultfiles_selected(evt: gr.SelectData, files):
696
  selected_index = evt.index
@@ -706,6 +814,12 @@ def on_resultfiles_finished(files):
706
  return display_output(filename)
707
 
708
 
 
 
 
 
 
 
709
  def display_output(filename):
710
  if util.is_video(filename) and roop.globals.CFG.output_show_video:
711
  return gr.Image(visible=False), gr.Video(visible=True, value=filename)
 
10
  from roop.ProcessEntry import ProcessEntry
11
  from roop.ProcessOptions import ProcessOptions
12
  from roop.FaceSet import FaceSet
13
+ from roop.utilities import clean_dir
14
 
15
  last_image = None
16
 
 
31
  is_processing = False
32
 
33
  list_files_process : list[ProcessEntry] = []
34
+ no_face_choices = ["Use untouched original frame","Retry rotated", "Skip Frame", "Skip Frame if no similar face", "Use last swapped"]
35
+ swap_choices = ["First found", "All input faces", "All female", "All male", "All faces", "Selected face"]
36
 
37
  current_video_fps = 50
38
 
 
46
  with gr.Row(variant='panel'):
47
  with gr.Column(scale=2):
48
  with gr.Row():
49
+ input_faces = gr.Gallery(label="Input faces gallery", allow_preview=False, preview=False, height=138, columns=64, object_fit="scale-down", interactive=False)
50
+ target_faces = gr.Gallery(label="Target faces gallery", allow_preview=False, preview=False, height=138, columns=64, object_fit="scale-down", interactive=False)
51
+ with gr.Row():
52
+ bt_move_left_input = gr.Button("⬅ Move left", size='sm')
53
+ bt_move_right_input = gr.Button("➡ Move right", size='sm')
54
+ bt_move_left_target = gr.Button("⬅ Move left", size='sm')
55
+ bt_move_right_target = gr.Button("➡ Move right", size='sm')
56
+ with gr.Row():
57
+ bt_remove_selected_input_face = gr.Button("❌ Remove selected", size='sm')
58
+ bt_clear_input_faces = gr.Button("💥 Clear all", variant='stop', size='sm')
59
+ bt_remove_selected_target_face = gr.Button("❌ Remove selected", size='sm')
60
+ bt_add_local = gr.Button('Add local files from', size='sm')
61
+
62
+ with gr.Row():
63
+ with gr.Column(scale=2):
64
  with gr.Accordion(label="Advanced Masking", open=False):
65
+ chk_showmaskoffsets = gr.Checkbox(
66
+ label="Show mask overlay in preview",
67
+ value=False,
68
+ interactive=True,
69
+ )
70
+ chk_restoreoriginalmouth = gr.Checkbox(
71
+ label="Restore original mouth area",
72
+ value=False,
73
+ interactive=True,
74
+ )
75
+ mask_top = gr.Slider(
76
+ 0,
77
+ 1.0,
78
+ value=0,
79
+ label="Offset Face Top",
80
+ step=0.01,
81
+ interactive=True,
82
+ )
83
+ mask_bottom = gr.Slider(
84
+ 0,
85
+ 1.0,
86
+ value=0,
87
+ label="Offset Face Bottom",
88
+ step=0.01,
89
+ interactive=True,
90
+ )
91
+ mask_left = gr.Slider(
92
+ 0,
93
+ 1.0,
94
+ value=0,
95
+ label="Offset Face Left",
96
+ step=0.01,
97
+ interactive=True,
98
+ )
99
+ mask_right = gr.Slider(
100
+ 0,
101
+ 1.0,
102
+ value=0,
103
+ label="Offset Face Right",
104
+ step=0.01,
105
+ interactive=True,
106
+ )
107
+ mask_erosion = gr.Slider(
108
+ 1.0,
109
+ 3.0,
110
+ value=1.0,
111
+ label="Erosion Iterations",
112
+ step=1.00,
113
+ interactive=True,
114
+ )
115
+ mask_blur = gr.Slider(
116
+ 10.0,
117
+ 50.0,
118
+ value=20.0,
119
+ label="Blur size",
120
+ step=1.00,
121
+ interactive=True,
122
+ )
123
+ bt_toggle_masking = gr.Button(
124
+ "Toggle manual masking", variant="secondary", size="sm"
125
+ )
126
+ selected_mask_engine = gr.Dropdown(
127
+ ["None", "Clip2Seg", "DFL XSeg"],
128
+ value="None",
129
+ label="Face masking engine",
130
+ )
131
+ clip_text = gr.Textbox(
132
+ label="List of objects to mask and restore back on fake face",
133
+ value="cup,hands,hair,banana",
134
+ interactive=False,
135
+ )
136
+ bt_preview_mask = gr.Button(
137
+ "👥 Show Mask Preview", variant="secondary"
138
+ )
139
+ with gr.Column(scale=2):
140
  local_folder = gr.Textbox(show_label=False, placeholder="/content/", interactive=True)
141
  with gr.Row(variant='panel'):
142
+ bt_srcfiles = gr.Files(label='Source Images or Facesets', file_count="multiple", file_types=["image", ".fsz"], elem_id='filelist', height=233)
143
  bt_destfiles = gr.Files(label='Target File(s)', file_count="multiple", file_types=["image", "video"], elem_id='filelist', height=233)
144
  with gr.Row(variant='panel'):
145
  gr.Markdown('')
146
  forced_fps = gr.Slider(minimum=0, maximum=120, value=0, label="Video FPS", info='Overrides detected fps if not 0', step=1.0, interactive=True, container=True)
147
 
148
  with gr.Column(scale=2):
149
+ previewimage = gr.Image(label="Preview Image", height=576, interactive=False, visible=True, format=get_gradio_output_format())
150
  maskimage = gr.ImageEditor(label="Manual mask Image", sources=["clipboard"], transforms="", type="numpy",
151
  brush=gr.Brush(color_mode="fixed", colors=["rgba(255, 255, 255, 1"]), interactive=True, visible=False)
152
  with gr.Row(variant='panel'):
153
+ fake_preview = gr.Checkbox(label="Face swap frames", value=False)
154
+ bt_refresh_preview = gr.Button("🔄 Refresh", variant='secondary', size='sm')
155
+ bt_use_face_from_preview = gr.Button("Use Face from this Frame", variant='primary', size='sm')
156
  with gr.Row():
157
  preview_frame_num = gr.Slider(1, 1, value=1, label="Frame Number", info='0:00:00', step=1.0, interactive=True)
158
  with gr.Row():
 
161
  set_frame_end = gr.Button("➡ Set as End", size='sm')
162
  with gr.Row(visible=False) as dynamic_face_selection:
163
  with gr.Column(scale=2):
164
+ face_selection = gr.Gallery(label="Detected faces", allow_preview=False, preview=False, height=138, object_fit="cover", columns=32)
165
  with gr.Column():
166
  bt_faceselect = gr.Button("☑ Use selected face", size='sm')
167
  bt_cancelfaceselect = gr.Button("Done", size='sm')
168
  with gr.Column():
169
  gr.Markdown(' ')
170
+
171
  with gr.Row(variant='panel'):
172
  with gr.Column(scale=1):
173
+ selected_face_detection = gr.Dropdown(swap_choices, value="First found", label="Specify face selection for swapping")
174
  with gr.Column(scale=1):
175
+ num_swap_steps = gr.Slider(1, 5, value=1, step=1.0, label="Number of swapping steps", info="More steps may increase likeness")
176
+ with gr.Column(scale=2):
177
  ui.globals.ui_selected_enhancer = gr.Dropdown(["None", "Codeformer", "DMDNet", "GFPGAN", "GPEN", "Restoreformer++"], value="None", label="Select post-processing")
178
 
179
  with gr.Row(variant='panel'):
180
  with gr.Column(scale=1):
181
  max_face_distance = gr.Slider(0.01, 1.0, value=0.65, label="Max Face Similarity Threshold", info="0.0 = identical 1.0 = no similarity")
182
  with gr.Column(scale=1):
183
+ ui.globals.ui_upscale = gr.Dropdown(["128px", "256px", "512px"], value="128px", label="Subsample upscale to", interactive=True)
184
  with gr.Column(scale=2):
185
  ui.globals.ui_blend_ratio = gr.Slider(0.0, 1.0, value=0.65, label="Original/Enhanced image blend ratio", info="Only used with active post-processing")
186
 
 
187
  with gr.Row(variant='panel'):
188
  with gr.Column(scale=1):
189
  video_swapping_method = gr.Dropdown(["Extract Frames to media","In-Memory processing"], value="In-Memory processing", label="Select video processing method", interactive=True)
 
196
  roop.globals.keep_frames = gr.Checkbox(label="Keep Frames (relevant only when extracting frames)", value=False)
197
  roop.globals.wait_after_extraction = gr.Checkbox(label="Wait for user key press before creating video ", value=False)
198
 
 
 
199
  with gr.Row(variant='panel'):
200
  with gr.Column():
201
  bt_start = gr.Button("▶ Start", variant='primary')
 
202
  with gr.Column():
203
  bt_stop = gr.Button("⏹ Stop", variant='secondary', interactive=False)
204
+ gr.Button("👀 Open Output Folder", size='sm').click(fn=lambda: util.open_folder(roop.globals.output_path))
205
  with gr.Column(scale=2):
206
+ output_method = gr.Dropdown(["File","Virtual Camera", "Both"], value="File", label="Select Output Method", interactive=True)
207
  with gr.Row(variant='panel'):
208
  with gr.Column():
209
  resultfiles = gr.Files(label='Processed File(s)', interactive=False)
 
212
  resultvideo = gr.Video(label='Final Video', interactive=False, visible=False)
213
 
214
  previewinputs = [preview_frame_num, bt_destfiles, fake_preview, ui.globals.ui_selected_enhancer, selected_face_detection,
215
+ max_face_distance, ui.globals.ui_blend_ratio, selected_mask_engine, clip_text, no_face_action, vr_mode, autorotate, maskimage, chk_showmaskoffsets, chk_restoreoriginalmouth, num_swap_steps, ui.globals.ui_upscale]
216
  previewoutputs = [previewimage, maskimage, preview_frame_num]
217
+ input_faces.select(on_select_input_face, None, None).success(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs)
218
+
219
+ bt_move_left_input.click(fn=move_selected_input, inputs=[bt_move_left_input], outputs=[input_faces])
220
+ bt_move_right_input.click(fn=move_selected_input, inputs=[bt_move_right_input], outputs=[input_faces])
221
+ bt_move_left_target.click(fn=move_selected_target, inputs=[bt_move_left_target], outputs=[target_faces])
222
+ bt_move_right_target.click(fn=move_selected_target, inputs=[bt_move_right_target], outputs=[target_faces])
223
+
224
  bt_remove_selected_input_face.click(fn=remove_selected_input_face, outputs=[input_faces])
225
+ bt_srcfiles.change(fn=on_srcfile_changed, show_progress='full', inputs=bt_srcfiles, outputs=[dynamic_face_selection, face_selection, input_faces, bt_srcfiles])
226
 
227
  mask_top.release(fn=on_mask_top_changed, inputs=[mask_top], show_progress='hidden')
228
  mask_bottom.release(fn=on_mask_bottom_changed, inputs=[mask_bottom], show_progress='hidden')
 
232
  mask_blur.release(fn=on_mask_blur_changed, inputs=[mask_blur], show_progress='hidden')
233
  selected_mask_engine.change(fn=on_mask_engine_changed, inputs=[selected_mask_engine], outputs=[clip_text], show_progress='hidden')
234
 
 
235
  target_faces.select(on_select_target_face, None, None)
236
  bt_remove_selected_target_face.click(fn=remove_selected_target_face, outputs=[target_faces])
237
 
238
  forced_fps.change(fn=on_fps_changed, inputs=[forced_fps], show_progress='hidden')
239
+ bt_destfiles.change(fn=on_destfiles_changed, inputs=[bt_destfiles], outputs=[preview_frame_num, text_frame_clip], show_progress='hidden').success(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs, show_progress='hidden')
240
+ bt_destfiles.select(fn=on_destfiles_selected, outputs=[preview_frame_num, text_frame_clip, forced_fps], show_progress='hidden').success(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs, show_progress='hidden')
241
  bt_destfiles.clear(fn=on_clear_destfiles, outputs=[target_faces, selected_face_detection])
242
  resultfiles.select(fn=on_resultfiles_selected, inputs=[resultfiles], outputs=[resultimage, resultvideo])
243
 
244
  face_selection.select(on_select_face, None, None)
245
  bt_faceselect.click(fn=on_selected_face, outputs=[input_faces, target_faces, selected_face_detection])
246
  bt_cancelfaceselect.click(fn=on_end_face_selection, outputs=[dynamic_face_selection, face_selection])
 
 
247
 
248
+ bt_clear_input_faces.click(fn=on_clear_input_faces, outputs=[input_faces])
249
 
250
  bt_add_local.click(fn=on_add_local_folder, inputs=[local_folder], outputs=[bt_destfiles])
251
  bt_preview_mask.click(fn=on_preview_mask, inputs=[preview_frame_num, bt_destfiles, clip_text, selected_mask_engine], outputs=[previewimage])
252
 
253
  start_event = bt_start.click(fn=start_swap,
254
+ inputs=[output_method, ui.globals.ui_selected_enhancer, selected_face_detection, roop.globals.keep_frames, roop.globals.wait_after_extraction,
255
+ roop.globals.skip_audio, max_face_distance, ui.globals.ui_blend_ratio, selected_mask_engine, clip_text,video_swapping_method, no_face_action, vr_mode, autorotate, chk_restoreoriginalmouth, num_swap_steps, ui.globals.ui_upscale, maskimage],
256
  outputs=[bt_start, bt_stop, resultfiles], show_progress='full')
257
+ after_swap_event = start_event.success(fn=on_resultfiles_finished, inputs=[resultfiles], outputs=[resultimage, resultvideo])
258
+
259
  bt_stop.click(fn=stop_swap, cancels=[start_event, after_swap_event], outputs=[bt_start, bt_stop], queue=False)
260
+
261
  bt_refresh_preview.click(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs)
262
  bt_toggle_masking.click(fn=on_toggle_masking, inputs=[previewimage, maskimage], outputs=[previewimage, maskimage])
263
  fake_preview.change(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs)
 
266
  set_frame_start.click(fn=on_set_frame, inputs=[set_frame_start, preview_frame_num], outputs=[text_frame_clip])
267
  set_frame_end.click(fn=on_set_frame, inputs=[set_frame_end, preview_frame_num], outputs=[text_frame_clip])
268
 
269
+
 
270
  def on_mask_top_changed(mask_offset):
271
  set_mask_offset(0, mask_offset)
272
 
 
305
  return gr.Textbox(interactive=False)
306
 
307
 
 
308
  def on_add_local_folder(folder):
309
  files = util.get_local_files_from_folder(folder)
310
  if files is None:
 
318
  IS_INPUT = True
319
 
320
  if srcfiles is None or len(srcfiles) < 1:
321
+ return gr.Column(visible=False), None, ui.globals.ui_input_thumbs, None
322
 
 
323
  for f in srcfiles:
324
  source_path = f.name
325
  if source_path.lower().endswith('fsz'):
 
369
  roop.globals.INPUT_FACESETS.append(face_set)
370
 
371
  progress(1.0)
372
+ return gr.Column(visible=False), None, ui.globals.ui_input_thumbs,None
 
 
 
 
 
 
373
 
374
 
375
  def on_select_input_face(evt: gr.SelectData):
 
390
 
391
  return ui.globals.ui_input_thumbs
392
 
393
+ def move_selected_input(button_text):
394
+ global SELECTED_INPUT_FACE_INDEX
395
+
396
+ if button_text == "⬅ Move left":
397
+ if SELECTED_INPUT_FACE_INDEX <= 0:
398
+ return ui.globals.ui_input_thumbs
399
+ offset = -1
400
+ else:
401
+ if len(ui.globals.ui_input_thumbs) <= SELECTED_INPUT_FACE_INDEX:
402
+ return ui.globals.ui_input_thumbs
403
+ offset = 1
404
+
405
+ f = roop.globals.INPUT_FACESETS.pop(SELECTED_INPUT_FACE_INDEX)
406
+ roop.globals.INPUT_FACESETS.insert(SELECTED_INPUT_FACE_INDEX + offset, f)
407
+ f = ui.globals.ui_input_thumbs.pop(SELECTED_INPUT_FACE_INDEX)
408
+ ui.globals.ui_input_thumbs.insert(SELECTED_INPUT_FACE_INDEX + offset, f)
409
+ return ui.globals.ui_input_thumbs
410
+
411
+
412
+ def move_selected_target(button_text):
413
+ global SELECTED_TARGET_FACE_INDEX
414
+
415
+ if button_text == "⬅ Move left":
416
+ if SELECTED_TARGET_FACE_INDEX <= 0:
417
+ return ui.globals.ui_target_thumbs
418
+ offset = -1
419
+ else:
420
+ if len(ui.globals.ui_target_thumbs) <= SELECTED_TARGET_FACE_INDEX:
421
+ return ui.globals.ui_target_thumbs
422
+ offset = 1
423
+
424
+ f = roop.globals.TARGET_FACES.pop(SELECTED_TARGET_FACE_INDEX)
425
+ roop.globals.TARGET_FACES.insert(SELECTED_TARGET_FACE_INDEX + offset, f)
426
+ f = ui.globals.ui_target_thumbs.pop(SELECTED_TARGET_FACE_INDEX)
427
+ ui.globals.ui_target_thumbs.insert(SELECTED_TARGET_FACE_INDEX + offset, f)
428
+ return ui.globals.ui_target_thumbs
429
+
430
+
431
+
432
+
433
  def on_select_target_face(evt: gr.SelectData):
434
  global SELECTED_TARGET_FACE_INDEX
435
 
436
  SELECTED_TARGET_FACE_INDEX = evt.index
437
 
438
  def remove_selected_target_face():
439
+ if len(ui.globals.ui_target_thumbs) > SELECTED_TARGET_FACE_INDEX:
440
  f = roop.globals.TARGET_FACES.pop(SELECTED_TARGET_FACE_INDEX)
441
  del f
442
  if len(ui.globals.ui_target_thumbs) > SELECTED_TARGET_FACE_INDEX:
 
445
  return ui.globals.ui_target_thumbs
446
 
447
 
 
 
 
448
  def on_use_face_from_selected(files, frame_num):
449
  global IS_INPUT, SELECTION_FACES_DATA
450
 
 
472
  else:
473
  gr.Info('No faces detected!')
474
  roop.globals.target_path = None
475
+ else:
476
+ gr.Info('Unknown image/video type!')
477
+ roop.globals.target_path = None
478
 
479
  if len(thumbs) == 1:
480
  roop.globals.TARGET_FACES.append(SELECTION_FACES_DATA[0][0])
 
484
  return gr.Row(visible=True), thumbs, gr.Gallery(visible=True), gr.Dropdown(visible=True)
485
 
486
 
 
487
  def on_select_face(evt: gr.SelectData): # SelectData is a subclass of EventData
488
  global SELECTED_FACE_INDEX
489
  SELECTED_FACE_INDEX = evt.index
490
+
491
 
492
  def on_selected_face():
493
  global IS_INPUT, SELECTED_FACE_INDEX, SELECTION_FACES_DATA
 
505
  roop.globals.TARGET_FACES.append(fd[0])
506
  ui.globals.ui_target_thumbs.append(image)
507
  return gr.Gallery(visible=True), ui.globals.ui_target_thumbs, gr.Dropdown(value='Selected face')
508
+
509
  # bt_faceselect.click(fn=on_selected_face, outputs=[dynamic_face_selection, face_selection, input_faces, target_faces])
510
 
511
  def on_end_face_selection():
 
513
 
514
 
515
  def on_preview_frame_changed(frame_num, files, fake_preview, enhancer, detection, face_distance, blend_ratio,
516
+ selected_mask_engine, clip_text, no_face_action, vr_mode, auto_rotate, maskimage, show_face_area, restore_original_mouth, num_steps, upsample):
517
  global SELECTED_INPUT_FACE_INDEX, manual_masking, current_video_fps
518
 
519
  from roop.core import live_swap, get_processing_plugins
 
560
  roop.globals.no_face_action = index_of_no_face_action(no_face_action)
561
  roop.globals.vr_mode = vr_mode
562
  roop.globals.autorotate_faces = auto_rotate
563
+ roop.globals.subsample_size = int(upsample[:3])
564
+
565
 
566
  mask_engine = map_mask_engine(selected_mask_engine, clip_text)
567
 
 
572
  face_index = 0
573
 
574
  options = ProcessOptions(get_processing_plugins(mask_engine), roop.globals.distance_threshold, roop.globals.blend_ratio,
575
+ roop.globals.face_swap_mode, face_index, clip_text, maskimage, num_steps, roop.globals.subsample_size, show_face_area, restore_original_mouth)
576
 
577
  current_frame = live_swap(current_frame, options)
578
  if current_frame is None:
 
589
  else:
590
  mask_engine = None
591
  return mask_engine
 
592
 
593
 
594
  def on_toggle_masking(previewimage, mask):
 
620
  list_files_process[idx].endframe = max(frame_num, start)
621
 
622
  return gen_processing_text(list_files_process[idx].startframe,list_files_process[idx].endframe)
 
623
 
624
 
625
  def on_preview_mask(frame_num, files, clip_text, mask_engine):
 
644
  elif mask_engine == "DFL XSeg":
645
  mask_engine = "mask_xseg"
646
  options = ProcessOptions(get_processing_plugins(mask_engine), roop.globals.distance_threshold, roop.globals.blend_ratio,
647
+ "all", 0, clip_text, None, 0, 128, False, False, True)
648
 
649
  current_frame = live_swap(current_frame, options)
650
  return util.convert_to_gradio(current_frame)
651
 
652
 
 
653
  def on_clear_input_faces():
654
  ui.globals.ui_input_thumbs.clear()
655
  roop.globals.INPUT_FACESETS.clear()
 
671
  return "selected"
672
  elif dropdown_text == "First found":
673
  return "first"
674
+ elif dropdown_text == "All input faces":
675
+ return "all_input"
676
  elif dropdown_text == "All female":
677
  return "all_female"
678
  elif dropdown_text == "All male":
 
681
  return "all"
682
 
683
 
684
+ def start_swap( output_method, enhancer, detection, keep_frames, wait_after_extraction, skip_audio, face_distance, blend_ratio,
685
+ selected_mask_engine, clip_text, processing_method, no_face_action, vr_mode, autorotate, restore_original_mouth, num_swap_steps, upsample, imagemask, progress=gr.Progress()):
 
686
  from ui.main import prepare_environment
687
  from roop.core import batch_process_regular
688
  global is_processing, list_files_process
 
691
  return gr.Button(variant="primary"), None, None
692
 
693
  if roop.globals.CFG.clear_output:
694
+ clean_dir(roop.globals.output_path)
695
 
696
  if not util.is_installed("ffmpeg"):
697
  msg = "ffmpeg is not installed! No video processing possible."
 
710
  roop.globals.no_face_action = index_of_no_face_action(no_face_action)
711
  roop.globals.vr_mode = vr_mode
712
  roop.globals.autorotate_faces = autorotate
713
+ roop.globals.subsample_size = int(upsample[:3])
714
  mask_engine = map_mask_engine(selected_mask_engine, clip_text)
715
 
716
  if roop.globals.face_swap_mode == 'selected':
 
725
  roop.globals.video_quality = roop.globals.CFG.video_quality
726
  roop.globals.max_memory = roop.globals.CFG.memory_limit if roop.globals.CFG.memory_limit > 0 else None
727
 
728
+ batch_process_regular(output_method, list_files_process, mask_engine, clip_text, processing_method == "In-Memory processing", imagemask, restore_original_mouth, num_swap_steps, progress, SELECTED_INPUT_FACE_INDEX)
729
  is_processing = False
730
  outdir = pathlib.Path(roop.globals.output_path)
731
  outfiles = [str(item) for item in outdir.rglob("*") if item.is_file()]
 
766
 
767
  if util.is_video(filename) or filename.lower().endswith('gif'):
768
  total_frames = get_video_frame_total(filename)
769
+ if total_frames is None or total_frames < 1:
770
+ total_frames = 1
771
+ gr.Warning(f"Corrupted video {filename}, can't detect number of frames!")
772
+ else:
773
+ current_video_fps = util.detect_fps(filename)
774
  else:
775
  total_frames = 1
776
  list_files_process[idx].endframe = total_frames
777
  if total_frames > 1:
778
  return gr.Slider(value=1, maximum=total_frames, info='0:00:00'), gen_processing_text(list_files_process[idx].startframe,list_files_process[idx].endframe)
779
  return gr.Slider(value=1, maximum=total_frames, info='0:00:00'), ''
 
 
780
 
781
 
782
  def on_destfiles_selected(evt: gr.SelectData):
 
798
  if total_frames > 1:
799
  return gr.Slider(value=list_files_process[idx].startframe, maximum=total_frames, info='0:00:00'), gen_processing_text(list_files_process[idx].startframe,list_files_process[idx].endframe), fps
800
  return gr.Slider(value=1, maximum=total_frames, info='0:00:00'), gen_processing_text(0,0), fps
801
+
 
802
 
803
  def on_resultfiles_selected(evt: gr.SelectData, files):
804
  selected_index = evt.index
 
814
  return display_output(filename)
815
 
816
 
817
+ def get_gradio_output_format():
818
+ if roop.globals.CFG.output_image_format == "jpg":
819
+ return "jpeg"
820
+ return roop.globals.CFG.output_image_format
821
+
822
+
823
  def display_output(filename):
824
  if util.is_video(filename) and roop.globals.CFG.output_show_video:
825
  return gr.Image(visible=False), gr.Video(visible=True, value=filename)
ui/tabs/livecam_tab.py CHANGED
@@ -25,21 +25,24 @@ def livecam_tab():
25
  cb_obs = gr.Checkbox(label="Forward stream to virtual camera", interactive=True)
26
  with gr.Column():
27
  dd_reso = gr.Dropdown(choices=["640x480","1280x720", "1920x1080"], value="1280x720", label="Fake Camera Resolution", interactive=True)
 
 
28
 
29
  with gr.Row():
30
- fake_cam_image = gr.Image(label='Fake Camera Output', interactive=False)
31
 
32
- start_event = bt_start.click(fn=start_cam, inputs=[cb_obs, camera_num, dd_reso, ui.globals.ui_selected_enhancer, ui.globals.ui_blend_ratio],outputs=[bt_start, bt_stop,fake_cam_image])
33
  bt_stop.click(fn=stop_swap, cancels=[start_event], outputs=[bt_start, bt_stop], queue=False)
34
 
35
 
36
- def start_cam(stream_to_obs, cam, reso, enhancer, blend_ratio):
37
  from roop.virtualcam import start_virtual_cam
38
  from roop.utilities import convert_to_gradio
39
 
40
- start_virtual_cam(stream_to_obs, cam, reso)
41
  roop.globals.selected_enhancer = enhancer
42
  roop.globals.blend_ratio = blend_ratio
 
 
43
  while True:
44
  yield gr.Button(interactive=False), gr.Button(interactive=True), convert_to_gradio(ui.globals.ui_camera_frame)
45
 
 
25
  cb_obs = gr.Checkbox(label="Forward stream to virtual camera", interactive=True)
26
  with gr.Column():
27
  dd_reso = gr.Dropdown(choices=["640x480","1280x720", "1920x1080"], value="1280x720", label="Fake Camera Resolution", interactive=True)
28
+ cb_xseg = gr.Checkbox(label="Use DFL Xseg masking", interactive=True, value=True)
29
+ cb_mouthrestore = gr.Checkbox(label="Restore original mouth area", interactive=True, value=False)
30
 
31
  with gr.Row():
32
+ fake_cam_image = gr.Image(label='Fake Camera Output', interactive=False, format="jpeg")
33
 
34
+ start_event = bt_start.click(fn=start_cam, inputs=[cb_obs, cb_xseg, cb_mouthrestore, camera_num, dd_reso, ui.globals.ui_selected_enhancer, ui.globals.ui_blend_ratio, ui.globals.ui_upscale],outputs=[bt_start, bt_stop,fake_cam_image])
35
  bt_stop.click(fn=stop_swap, cancels=[start_event], outputs=[bt_start, bt_stop], queue=False)
36
 
37
 
38
+ def start_cam(stream_to_obs, use_xseg, use_mouthrestore, cam, reso, enhancer, blend_ratio, upscale):
39
  from roop.virtualcam import start_virtual_cam
40
  from roop.utilities import convert_to_gradio
41
 
 
42
  roop.globals.selected_enhancer = enhancer
43
  roop.globals.blend_ratio = blend_ratio
44
+ roop.globals.subsample_size = int(upscale[:3])
45
+ start_virtual_cam(stream_to_obs, use_xseg, use_mouthrestore, cam, reso)
46
  while True:
47
  yield gr.Button(interactive=False), gr.Button(interactive=True), convert_to_gradio(ui.globals.ui_camera_frame)
48
 
ui/tabs/settings_tab.py CHANGED
@@ -3,6 +3,7 @@ import os
3
  import gradio as gr
4
  import roop.globals
5
  import ui.globals
 
6
 
7
  available_themes = ["Default", "gradio/glass", "gradio/monochrome", "gradio/seafoam", "gradio/soft", "gstaff/xkcd", "freddyaboulton/dracula_revamped", "ysharma/steampunk"]
8
  image_formats = ['jpg','png', 'webp']
@@ -102,14 +103,13 @@ def on_settings_changed(evt: gr.SelectData):
102
  def clean_temp():
103
  from ui.main import prepare_environment
104
 
105
- if not roop.globals.CFG.use_os_temp_folder:
106
- shutil.rmtree(os.environ["TEMP"])
107
- prepare_environment()
108
-
109
  ui.globals.ui_input_thumbs.clear()
110
  roop.globals.INPUT_FACESETS.clear()
111
  roop.globals.TARGET_FACES.clear()
112
  ui.globals.ui_target_thumbs = []
 
 
 
113
  gr.Info('Temp Files removed')
114
  return None,None,None,None
115
 
 
3
  import gradio as gr
4
  import roop.globals
5
  import ui.globals
6
+ from roop.utilities import clean_dir
7
 
8
  available_themes = ["Default", "gradio/glass", "gradio/monochrome", "gradio/seafoam", "gradio/soft", "gstaff/xkcd", "freddyaboulton/dracula_revamped", "ysharma/steampunk"]
9
  image_formats = ['jpg','png', 'webp']
 
103
  def clean_temp():
104
  from ui.main import prepare_environment
105
 
 
 
 
 
106
  ui.globals.ui_input_thumbs.clear()
107
  roop.globals.INPUT_FACESETS.clear()
108
  roop.globals.TARGET_FACES.clear()
109
  ui.globals.ui_target_thumbs = []
110
+ if not roop.globals.CFG.use_os_temp_folder:
111
+ clean_dir(os.environ["TEMP"])
112
+ prepare_environment()
113
  gr.Info('Temp Files removed')
114
  return None,None,None,None
115