Update app.py
Browse files
app.py
CHANGED
@@ -66,8 +66,8 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
66 |
|
67 |
request_log = []
|
68 |
|
69 |
-
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path)
|
70 |
-
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path)
|
71 |
|
72 |
|
73 |
def compute_clip_embedding(text=None, image=None):
|
@@ -422,7 +422,7 @@ def create_advanced_options():
|
|
422 |
num_frames_slider = gr.Slider(
|
423 |
label="4.5 Number of Frames",
|
424 |
minimum=1,
|
425 |
-
maximum=
|
426 |
step=1,
|
427 |
value=60,
|
428 |
visible=False,
|
@@ -534,8 +534,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
|
|
534 |
|
535 |
txt2vid_frame_rate = gr.Slider(
|
536 |
label="Step 3.2: Frame Rate",
|
537 |
-
minimum=
|
538 |
-
maximum=
|
539 |
step=1,
|
540 |
value=20,
|
541 |
)
|
|
|
66 |
|
67 |
request_log = []
|
68 |
|
69 |
+
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path).to(deice)
|
70 |
+
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path).to(deice)
|
71 |
|
72 |
|
73 |
def compute_clip_embedding(text=None, image=None):
|
|
|
422 |
num_frames_slider = gr.Slider(
|
423 |
label="4.5 Number of Frames",
|
424 |
minimum=1,
|
425 |
+
maximum=300,
|
426 |
step=1,
|
427 |
value=60,
|
428 |
visible=False,
|
|
|
534 |
|
535 |
txt2vid_frame_rate = gr.Slider(
|
536 |
label="Step 3.2: Frame Rate",
|
537 |
+
minimum=6,
|
538 |
+
maximum=60,
|
539 |
step=1,
|
540 |
value=20,
|
541 |
)
|