ford442 commited on
Commit
b7f4b96
1 Parent(s): 0838968

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -69,7 +69,7 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
69
 
70
  request_log = []
71
 
72
- clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path).to(torch.device("cuda:0"))
73
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path)
74
 
75
  def compute_clip_embedding(text=None, image=None):
@@ -404,8 +404,8 @@ def generate_video_from_image_90(
404
  def create_advanced_options():
405
  with gr.Accordion("Step 4: Advanced Options (Optional)", open=False):
406
  seed = gr.Slider(label="4.1 Seed", minimum=0, maximum=1000000, step=1, value=646373)
407
- inference_steps = gr.Slider(label="4.2 Inference Steps", minimum=5, maximum=150, step=5, value=40)
408
- guidance_scale = gr.Slider(label="4.3 Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=4.2)
409
 
410
  height_slider = gr.Slider(
411
  label="4.4 Height",
 
69
 
70
  request_log = []
71
 
72
+ clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path)
73
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path)
74
 
75
  def compute_clip_embedding(text=None, image=None):
 
404
  def create_advanced_options():
405
  with gr.Accordion("Step 4: Advanced Options (Optional)", open=False):
406
  seed = gr.Slider(label="4.1 Seed", minimum=0, maximum=1000000, step=1, value=646373)
407
+ inference_steps = gr.Slider(label="4.2 Inference Steps", minimum=5, maximum=150, step=5, value=30)
408
+ guidance_scale = gr.Slider(label="4.3 Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=3.0)
409
 
410
  height_slider = gr.Slider(
411
  label="4.4 Height",