eranlevinlt
commited on
Commit
•
300ca95
1
Parent(s):
100ddf0
force clean
Browse files
app.py
CHANGED
@@ -257,7 +257,7 @@ def generate_video_from_image(
|
|
257 |
if not image_path:
|
258 |
raise gr.Error("Please provide an input image.", duration=5)
|
259 |
|
260 |
-
media_items = load_image_to_tensor_with_resize(image_path, height, width).to(device)
|
261 |
|
262 |
sample = {
|
263 |
"prompt": prompt,
|
@@ -271,41 +271,42 @@ def generate_video_from_image(
|
|
271 |
|
272 |
def gradio_progress_callback(self, step, timestep, kwargs):
|
273 |
progress((step + 1) / num_inference_steps)
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
|
|
309 |
|
310 |
return output_path
|
311 |
|
|
|
257 |
if not image_path:
|
258 |
raise gr.Error("Please provide an input image.", duration=5)
|
259 |
|
260 |
+
media_items = load_image_to_tensor_with_resize(image_path, height, width).to(device).detach()
|
261 |
|
262 |
sample = {
|
263 |
"prompt": prompt,
|
|
|
271 |
|
272 |
def gradio_progress_callback(self, step, timestep, kwargs):
|
273 |
progress((step + 1) / num_inference_steps)
|
274 |
+
try:
|
275 |
+
with torch.no_grad():
|
276 |
+
images = pipeline(
|
277 |
+
num_inference_steps=num_inference_steps,
|
278 |
+
num_images_per_prompt=1,
|
279 |
+
guidance_scale=guidance_scale,
|
280 |
+
generator=generator,
|
281 |
+
output_type="pt",
|
282 |
+
height=height,
|
283 |
+
width=width,
|
284 |
+
num_frames=num_frames,
|
285 |
+
frame_rate=frame_rate,
|
286 |
+
**sample,
|
287 |
+
is_video=True,
|
288 |
+
vae_per_channel_normalize=True,
|
289 |
+
conditioning_method=ConditioningMethod.FIRST_FRAME,
|
290 |
+
mixed_precision=True,
|
291 |
+
callback_on_step_end=gradio_progress_callback,
|
292 |
+
).images
|
293 |
+
|
294 |
+
output_path = tempfile.mktemp(suffix=".mp4")
|
295 |
+
video_np = images.squeeze(0).permute(1, 2, 3, 0).cpu().float().numpy()
|
296 |
+
video_np = (video_np * 255).astype(np.uint8)
|
297 |
+
height, width = video_np.shape[1:3]
|
298 |
+
out = cv2.VideoWriter(
|
299 |
+
output_path, cv2.VideoWriter_fourcc(*"mp4v"), frame_rate, (width, height)
|
300 |
+
)
|
301 |
+
for frame in video_np[..., ::-1]:
|
302 |
+
out.write(frame)
|
303 |
+
out.release()
|
304 |
+
finally:
|
305 |
+
del media_items
|
306 |
+
del images
|
307 |
+
del video_np
|
308 |
+
gc.collect()
|
309 |
+
torch.cuda.empty_cache()
|
310 |
|
311 |
return output_path
|
312 |
|