Update
Browse files- app.py +4 -1
- requirements.txt +2 -2
app.py
CHANGED
@@ -28,6 +28,9 @@ DESCRIPTION = '# [Tune-A-Video](https://tuneavideo.github.io/)'
|
|
28 |
if not torch.cuda.is_available():
|
29 |
DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
|
30 |
|
|
|
|
|
|
|
31 |
HF_TOKEN = os.getenv('HF_TOKEN')
|
32 |
pipe = InferencePipeline(HF_TOKEN)
|
33 |
app = InferenceUtil(HF_TOKEN)
|
@@ -198,7 +201,7 @@ with gr.Blocks(css='style.css') as demo:
|
|
198 |
],
|
199 |
outputs=result,
|
200 |
fn=pipe.run,
|
201 |
-
cache_examples=
|
202 |
|
203 |
model_id.change(fn=app.load_model_info,
|
204 |
inputs=model_id,
|
|
|
28 |
if not torch.cuda.is_available():
|
29 |
DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
|
30 |
|
31 |
+
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv(
|
32 |
+
'CACHE_EXAMPLES') == '1'
|
33 |
+
|
34 |
HF_TOKEN = os.getenv('HF_TOKEN')
|
35 |
pipe = InferencePipeline(HF_TOKEN)
|
36 |
app = InferenceUtil(HF_TOKEN)
|
|
|
201 |
],
|
202 |
outputs=result,
|
203 |
fn=pipe.run,
|
204 |
+
cache_examples=CACHE_EXAMPLES)
|
205 |
|
206 |
model_id.change(fn=app.load_model_info,
|
207 |
inputs=model_id,
|
requirements.txt
CHANGED
@@ -4,8 +4,8 @@ decord==0.6.0
|
|
4 |
diffusers[torch]==0.11.1
|
5 |
einops==0.6.1
|
6 |
ftfy==6.1.1
|
7 |
-
gradio==3.
|
8 |
-
huggingface-hub==0.
|
9 |
imageio==2.31.0
|
10 |
imageio-ffmpeg==0.4.8
|
11 |
omegaconf==2.3.0
|
|
|
4 |
diffusers[torch]==0.11.1
|
5 |
einops==0.6.1
|
6 |
ftfy==6.1.1
|
7 |
+
gradio==3.36.1
|
8 |
+
huggingface-hub==0.16.4
|
9 |
imageio==2.31.0
|
10 |
imageio-ffmpeg==0.4.8
|
11 |
omegaconf==2.3.0
|