Update app.py
Browse files
app.py
CHANGED
@@ -26,15 +26,16 @@ import csv
|
|
26 |
from datetime import datetime
|
27 |
from openai import OpenAI
|
28 |
|
29 |
-
torch.backends.cuda.matmul.allow_tf32 =
|
30 |
-
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction =
|
31 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
32 |
-
torch.backends.cudnn.allow_tf32 =
|
33 |
torch.backends.cudnn.deterministic = False
|
34 |
-
|
35 |
torch.backends.cudnn.benchmark = False
|
|
|
|
|
36 |
|
37 |
-
torch.set_float32_matmul_precision("
|
38 |
|
39 |
# Load Hugging Face token if needed
|
40 |
hf_token = os.getenv("HF_TOKEN")
|
@@ -229,7 +230,7 @@ def generate_video_from_text(
|
|
229 |
txt2vid_analytics_toggle=True,
|
230 |
negative_prompt="",
|
231 |
frame_rate=25,
|
232 |
-
seed=
|
233 |
num_inference_steps=30,
|
234 |
guidance_scale=3,
|
235 |
height=512,
|
@@ -253,7 +254,7 @@ def generate_video_from_text(
|
|
253 |
"media_items": None,
|
254 |
}
|
255 |
|
256 |
-
generator = torch.Generator(device="
|
257 |
|
258 |
def gradio_progress_callback(self, step, timestep, kwargs):
|
259 |
progress((step + 1) / num_inference_steps)
|
@@ -300,6 +301,8 @@ def generate_video_from_text(
|
|
300 |
del video_np
|
301 |
torch.cuda.empty_cache()
|
302 |
return output_path
|
|
|
|
|
303 |
|
304 |
@spaces.GPU(duration=80)
|
305 |
def generate_video_from_image(
|
@@ -309,7 +312,7 @@ def generate_video_from_image(
|
|
309 |
img2vid_analytics_toggle=True,
|
310 |
negative_prompt="",
|
311 |
frame_rate=25,
|
312 |
-
seed=
|
313 |
num_inference_steps=30,
|
314 |
guidance_scale=3,
|
315 |
height=512,
|
@@ -349,7 +352,7 @@ def generate_video_from_image(
|
|
349 |
"media_items": media_items,
|
350 |
}
|
351 |
|
352 |
-
generator = torch.Generator(device="
|
353 |
|
354 |
def gradio_progress_callback(self, step, timestep, kwargs):
|
355 |
progress((step + 1) / num_inference_steps)
|
|
|
26 |
from datetime import datetime
|
27 |
from openai import OpenAI
|
28 |
|
29 |
+
torch.backends.cuda.matmul.allow_tf32 = False
|
30 |
+
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
31 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
32 |
+
torch.backends.cudnn.allow_tf32 = False
|
33 |
torch.backends.cudnn.deterministic = False
|
|
|
34 |
torch.backends.cudnn.benchmark = False
|
35 |
+
torch.backends.cuda.preferred_blas_library="cublas"
|
36 |
+
torch.backends.cuda.preferred_linalg_library="cusolver"
|
37 |
|
38 |
+
torch.set_float32_matmul_precision("highest")
|
39 |
|
40 |
# Load Hugging Face token if needed
|
41 |
hf_token = os.getenv("HF_TOKEN")
|
|
|
230 |
txt2vid_analytics_toggle=True,
|
231 |
negative_prompt="",
|
232 |
frame_rate=25,
|
233 |
+
seed=random.randint(0, MAX_SEED),
|
234 |
num_inference_steps=30,
|
235 |
guidance_scale=3,
|
236 |
height=512,
|
|
|
254 |
"media_items": None,
|
255 |
}
|
256 |
|
257 |
+
generator = torch.Generator(device="cpu").manual_seed(seed)
|
258 |
|
259 |
def gradio_progress_callback(self, step, timestep, kwargs):
|
260 |
progress((step + 1) / num_inference_steps)
|
|
|
301 |
del video_np
|
302 |
torch.cuda.empty_cache()
|
303 |
return output_path
|
304 |
+
|
305 |
+
MAX_SEED = np.iinfo(np.int64).max
|
306 |
|
307 |
@spaces.GPU(duration=80)
|
308 |
def generate_video_from_image(
|
|
|
312 |
img2vid_analytics_toggle=True,
|
313 |
negative_prompt="",
|
314 |
frame_rate=25,
|
315 |
+
seed=random.randint(0, MAX_SEED),
|
316 |
num_inference_steps=30,
|
317 |
guidance_scale=3,
|
318 |
height=512,
|
|
|
352 |
"media_items": media_items,
|
353 |
}
|
354 |
|
355 |
+
generator = torch.Generator(device="cpu").manual_seed(seed)
|
356 |
|
357 |
def gradio_progress_callback(self, step, timestep, kwargs):
|
358 |
progress((step + 1) / num_inference_steps)
|