ford442 commited on
Commit
f6ed375
1 Parent(s): a4a1720

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -3
app.py CHANGED
@@ -160,7 +160,7 @@ preset_options = [
160
  {"label": "1216x704, 41 frames", "width": 1216, "height": 704, "num_frames": 41},
161
  {"label": "1088x704, 49 frames", "width": 1088, "height": 704, "num_frames": 49},
162
  {"label": "1056x640, 57 frames", "width": 1056, "height": 640, "num_frames": 57},
163
- {"label": "448x448, 100 frames", "width": 448, "height": 448, "num_frames": 200},
164
  {"label": "448x448, 200 frames", "width": 448, "height": 448, "num_frames": 200},
165
  {"label": "640x640, 80 frames", "width": 640, "height": 640, "num_frames": 80},
166
  {"label": "768x768, 64 frames", "width": 768, "height": 768, "num_frames": 64},
@@ -225,7 +225,19 @@ pipeline = XoraVideoPipeline(
225
  vae=vae,
226
  ).to(torch.bfloat16).to(device)
227
 
228
- @spaces.GPU(duration=80)
 
 
 
 
 
 
 
 
 
 
 
 
229
  def generate_video_from_text(
230
  prompt="",
231
  enhance_prompt_toggle=False,
@@ -304,7 +316,7 @@ def generate_video_from_text(
304
  torch.cuda.empty_cache()
305
  return output_path
306
 
307
- @spaces.GPU(duration=80)
308
  def generate_video_from_image(
309
  image_path,
310
  prompt="",
@@ -401,6 +413,14 @@ def create_advanced_options():
401
  seed = gr.Slider(label="4.1 Seed", minimum=0, maximum=1000000, step=1, value=646373)
402
  inference_steps = gr.Slider(label="4.2 Inference Steps", minimum=1, maximum=50, step=1, value=35)
403
  guidance_scale = gr.Slider(label="4.3 Guidance Scale", minimum=1.0, maximum=5.0, step=0.1, value=4.2)
 
 
 
 
 
 
 
 
404
 
405
  height_slider = gr.Slider(
406
  label="4.4 Height",
 
160
  {"label": "1216x704, 41 frames", "width": 1216, "height": 704, "num_frames": 41},
161
  {"label": "1088x704, 49 frames", "width": 1088, "height": 704, "num_frames": 49},
162
  {"label": "1056x640, 57 frames", "width": 1056, "height": 640, "num_frames": 57},
163
+ {"label": "448x448, 100 frames", "width": 448, "height": 448, "num_frames": 100},
164
  {"label": "448x448, 200 frames", "width": 448, "height": 448, "num_frames": 200},
165
  {"label": "640x640, 80 frames", "width": 640, "height": 640, "num_frames": 80},
166
  {"label": "768x768, 64 frames", "width": 768, "height": 768, "num_frames": 64},
 
225
  vae=vae,
226
  ).to(torch.bfloat16).to(device)
227
 
228
+ GPU_DURATION_OPTIONS = {
229
+ "Short (45s)": 45,
230
+ "Short (60s)": 60,
231
+ "Medium (80s)": 80,
232
+ "Medium (100s)": 100,
233
+ "Long (120s)": 120,
234
+ "Long (140s)": 140,
235
+ }
236
+
237
+ def set_gpu_duration(duration_choice):
238
+ os.environ["GPU_DURATION"] = str(GPU_DURATION_OPTIONS[duration_choice])
239
+
240
+ @spaces.GPU(duration=int(os.getenv("GPU_DURATION", "80"))) # Dynamic duration
241
  def generate_video_from_text(
242
  prompt="",
243
  enhance_prompt_toggle=False,
 
316
  torch.cuda.empty_cache()
317
  return output_path
318
 
319
+ @spaces.GPU(duration=int(os.getenv("GPU_DURATION", "80"))) # Dynamic duration
320
  def generate_video_from_image(
321
  image_path,
322
  prompt="",
 
413
  seed = gr.Slider(label="4.1 Seed", minimum=0, maximum=1000000, step=1, value=646373)
414
  inference_steps = gr.Slider(label="4.2 Inference Steps", minimum=1, maximum=50, step=1, value=35)
415
  guidance_scale = gr.Slider(label="4.3 Guidance Scale", minimum=1.0, maximum=5.0, step=0.1, value=4.2)
416
+
417
+ gpu_duration = gr.Dropdown(
418
+ label="GPU Duration",
419
+ choices=list(GPU_DURATION_OPTIONS.keys()),
420
+ value="Medium (60s)" # Default value
421
+ )
422
+
423
+ gpu_duration.change(fn=set_gpu_duration, inputs=gpu_duration, outputs=[])
424
 
425
  height_slider = gr.Slider(
426
  label="4.4 Height",