Spaces:
Running
on
Zero
Running
on
Zero
gokaygokay
commited on
Commit
•
17135c5
1
Parent(s):
2408874
Update app.py
Browse files
app.py
CHANGED
@@ -158,30 +158,30 @@ class ModelManager:
|
|
158 |
return pipe
|
159 |
|
160 |
@timer_func
|
161 |
-
def
|
162 |
-
|
163 |
|
164 |
prompt = "masterpiece, best quality, highres"
|
165 |
negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
|
166 |
|
167 |
options = {
|
168 |
-
"prompt": prompt,
|
169 |
-
"negative_prompt": negative_prompt,
|
170 |
-
"image":
|
171 |
-
"control_image":
|
172 |
-
"width":
|
173 |
-
"height":
|
174 |
"strength": strength,
|
175 |
"num_inference_steps": num_inference_steps,
|
176 |
"guidance_scale": guidance_scale,
|
177 |
"generator": torch.Generator(device=device).manual_seed(0),
|
178 |
}
|
179 |
|
180 |
-
print("Running inference...")
|
181 |
-
|
182 |
-
print("
|
183 |
|
184 |
-
return
|
185 |
|
186 |
def prepare_image(self, input_image, resolution, hdr):
|
187 |
condition_image = self.resize_and_upscale(input_image, resolution)
|
@@ -320,27 +320,23 @@ def process_video(input_video, resolution, num_inference_steps, strength, hdr, g
|
|
320 |
|
321 |
try:
|
322 |
progress(0.2, desc="Processing frames...")
|
323 |
-
|
|
|
324 |
if abort_event.is_set():
|
325 |
print("Job aborted. Stopping processing of new frames.")
|
326 |
break
|
327 |
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
|
|
|
|
334 |
processed_image.save(output_frame_path)
|
335 |
-
|
336 |
-
|
337 |
-
prev_frame = f"frame_{int(frame_file.split('_')[1].split('.')[0]) - 1:06d}.png"
|
338 |
-
prev_frame_path = os.path.join(processed_frames_folder, prev_frame)
|
339 |
-
if os.path.exists(prev_frame_path):
|
340 |
-
shutil.copy2(prev_frame_path, output_frame_path)
|
341 |
-
else:
|
342 |
-
shutil.copy2(os.path.join(frames_folder, frame_file), output_frame_path)
|
343 |
-
progress((0.2 + 0.7 * (i + 1) / frames_to_process), desc=f"Processing frame {i+1}/{frames_to_process}")
|
344 |
|
345 |
# Always attempt to reassemble video
|
346 |
progress(0.9, desc="Reassembling video...")
|
|
|
158 |
return pipe
|
159 |
|
160 |
@timer_func
|
161 |
+
def process_image_batch(self, input_images, resolution, num_inference_steps, strength, hdr, guidance_scale):
|
162 |
+
condition_images = [self.prepare_image(img, resolution, hdr) for img in input_images]
|
163 |
|
164 |
prompt = "masterpiece, best quality, highres"
|
165 |
negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
|
166 |
|
167 |
options = {
|
168 |
+
"prompt": [prompt] * len(input_images),
|
169 |
+
"negative_prompt": [negative_prompt] * len(input_images),
|
170 |
+
"image": condition_images,
|
171 |
+
"control_image": condition_images,
|
172 |
+
"width": condition_images[0].size[0],
|
173 |
+
"height": condition_images[0].size[1],
|
174 |
"strength": strength,
|
175 |
"num_inference_steps": num_inference_steps,
|
176 |
"guidance_scale": guidance_scale,
|
177 |
"generator": torch.Generator(device=device).manual_seed(0),
|
178 |
}
|
179 |
|
180 |
+
print("Running inference on batch...")
|
181 |
+
results = self.pipe(**options).images
|
182 |
+
print("Batch processing completed successfully")
|
183 |
|
184 |
+
return results
|
185 |
|
186 |
def prepare_image(self, input_image, resolution, hdr):
|
187 |
condition_image = self.resize_and_upscale(input_image, resolution)
|
|
|
320 |
|
321 |
try:
|
322 |
progress(0.2, desc="Processing frames...")
|
323 |
+
batch_size = 8
|
324 |
+
for i in tqdm(range(0, frames_to_process, batch_size), desc="Processing batches"):
|
325 |
if abort_event.is_set():
|
326 |
print("Job aborted. Stopping processing of new frames.")
|
327 |
break
|
328 |
|
329 |
+
batch_frames = frame_files[i:min(i+batch_size, frames_to_process)]
|
330 |
+
input_images = [Image.open(os.path.join(frames_folder, frame)) for frame in batch_frames]
|
331 |
+
|
332 |
+
processed_images = model_manager.process_image_batch(input_images, resolution, num_inference_steps, strength, hdr, guidance_scale)
|
333 |
+
|
334 |
+
for frame_file, processed_image in zip(batch_frames, processed_images):
|
335 |
+
output_frame_path = os.path.join(processed_frames_folder, frame_file)
|
336 |
+
if not preserve_frames or not os.path.exists(output_frame_path):
|
337 |
processed_image.save(output_frame_path)
|
338 |
+
|
339 |
+
progress((0.2 + 0.7 * (i + batch_size) / frames_to_process), desc=f"Processed batch {i//batch_size + 1}/{(frames_to_process-1)//batch_size + 1}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
340 |
|
341 |
# Always attempt to reassemble video
|
342 |
progress(0.9, desc="Reassembling video...")
|