jbilcke-hf HF staff commited on
Commit
d7e0552
1 Parent(s): 3da10ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -8
app.py CHANGED
@@ -18,8 +18,11 @@ import cv2
18
  from PIL import Image
19
  import tempfile
20
  import os
 
21
  import gc
22
  from openai import OpenAI
 
 
23
 
24
  # Load Hugging Face token if needed
25
  hf_token = os.getenv("HF_TOKEN")
@@ -34,6 +37,14 @@ with open(system_prompt_t2v_path, "r") as f:
34
  with open(system_prompt_i2v_path, "r") as f:
35
  system_prompt_i2v = f.read()
36
 
 
 
 
 
 
 
 
 
37
  # Set model download directory within Hugging Face Spaces
38
  model_path = "asset"
39
  if not os.path.exists(model_path):
@@ -272,7 +283,7 @@ def generate_video_from_text(
272
  gc.collect()
273
 
274
  output_path = tempfile.mktemp(suffix=".mp4")
275
- print(images.shape)
276
  video_np = images.squeeze(0).permute(1, 2, 3, 0).cpu().float().numpy()
277
  video_np = (video_np * 255).astype(np.uint8)
278
  height, width = video_np.shape[1:3]
@@ -286,8 +297,15 @@ def generate_video_from_text(
286
  del images
287
  del video_np
288
  torch.cuda.empty_cache()
289
- return output_path
290
-
 
 
 
 
 
 
 
291
 
292
  def generate_video_from_image(
293
  secret_token="",
@@ -380,7 +398,14 @@ def generate_video_from_image(
380
  torch.cuda.empty_cache()
381
  gc.collect()
382
 
383
- return output_path
 
 
 
 
 
 
 
384
 
385
 
386
  def create_advanced_options():
@@ -486,7 +511,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
486
  )
487
 
488
  with gr.Column():
489
- txt2vid_output = gr.Video(label="Generated Output")
490
 
491
  with gr.Row():
492
  gr.Examples(
@@ -559,7 +584,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
559
  )
560
 
561
  with gr.Column():
562
- img2vid_output = gr.Video(label="Generated Output")
563
 
564
  with gr.Row():
565
  gr.Examples(
@@ -635,6 +660,6 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
635
  )
636
 
637
  if __name__ == "__main__":
638
- iface.queue(max_size=32, default_concurrency_limit=1, api_open=False).launch(
639
- share=True, show_api=True
640
  )
 
18
  from PIL import Image
19
  import tempfile
20
  import os
21
+ import errno
22
  import gc
23
  from openai import OpenAI
24
+ import base64
25
+ from io import BytesIO
26
 
27
  # Load Hugging Face token if needed
28
  hf_token = os.getenv("HF_TOKEN")
 
37
  with open(system_prompt_i2v_path, "r") as f:
38
  system_prompt_i2v = f.read()
39
 
40
+
41
+ def silentremove(filename):
42
+ try:
43
+ os.remove(filename)
44
+ except OSError as e: # this would be "except OSError, e:" before Python 2.6
45
+ if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
46
+ raise # re-raise exception if a different error occurred
47
+
48
  # Set model download directory within Hugging Face Spaces
49
  model_path = "asset"
50
  if not os.path.exists(model_path):
 
283
  gc.collect()
284
 
285
  output_path = tempfile.mktemp(suffix=".mp4")
286
+ #print(images.shape)
287
  video_np = images.squeeze(0).permute(1, 2, 3, 0).cpu().float().numpy()
288
  video_np = (video_np * 255).astype(np.uint8)
289
  height, width = video_np.shape[1:3]
 
297
  del images
298
  del video_np
299
  torch.cuda.empty_cache()
300
+
301
+ # Read the content of the video file and encode it to base64
302
+ with open(output_path, "rb") as video_file:
303
+ video_base64 = base64.b64encode(video_file.read()).decode('utf-8')
304
+
305
+ silentremove(output_path)
306
+
307
+ # Prepend the appropriate data URI header with MIME type
308
+ return 'data:video/mp4;base64,' + video_base64
309
 
310
  def generate_video_from_image(
311
  secret_token="",
 
398
  torch.cuda.empty_cache()
399
  gc.collect()
400
 
401
+ # Read the content of the video file and encode it to base64
402
+ with open(output_path, "rb") as video_file:
403
+ video_base64 = base64.b64encode(video_file.read()).decode('utf-8')
404
+
405
+ silentremove(output_path)
406
+
407
+ # Prepend the appropriate data URI header with MIME type
408
+ return 'data:video/mp4;base64,' + video_base64
409
 
410
 
411
  def create_advanced_options():
 
511
  )
512
 
513
  with gr.Column():
514
+ txt2vid_output = gr.Text(label="Generated Output (in base64)")
515
 
516
  with gr.Row():
517
  gr.Examples(
 
584
  )
585
 
586
  with gr.Column():
587
+ img2vid_output = gr.Text(label="Generated Output (in base64)")
588
 
589
  with gr.Row():
590
  gr.Examples(
 
660
  )
661
 
662
  if __name__ == "__main__":
663
+ iface.queue(max_size=32, default_concurrency_limit=1, api_open=True).launch(
664
+ share=False, show_api=True
665
  )