Spaces:
Paused
Paused
seawolf2357
commited on
Commit
โข
18e9814
1
Parent(s):
28e47d1
Update app.py
Browse files
app.py
CHANGED
@@ -3,10 +3,11 @@ import gradio as gr
|
|
3 |
from diffusers import AnimateDiffPipeline, MotionAdapter, DPMSolverMultistepScheduler, AutoencoderKL, SparseControlNetModel
|
4 |
from diffusers.utils import export_to_gif, load_image
|
5 |
from transformers import pipeline
|
|
|
|
|
6 |
|
7 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
8 |
|
9 |
-
# ํ๊ธ-์์ด ๋ฒ์ญ ๋ชจ๋ธ ๋ก๋
|
10 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
11 |
|
12 |
def translate_korean_to_english(text):
|
@@ -15,7 +16,7 @@ def translate_korean_to_english(text):
|
|
15 |
return translated
|
16 |
return text
|
17 |
|
18 |
-
def generate_video(prompt, negative_prompt, num_inference_steps, conditioning_frame_indices, controlnet_conditioning_scale):
|
19 |
prompt = translate_korean_to_english(prompt)
|
20 |
negative_prompt = translate_korean_to_english(negative_prompt)
|
21 |
|
@@ -50,24 +51,37 @@ def generate_video(prompt, negative_prompt, num_inference_steps, conditioning_fr
|
|
50 |
conditioning_frames=conditioning_frames,
|
51 |
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
52 |
controlnet_frame_indices=conditioning_frame_indices,
|
|
|
|
|
|
|
53 |
generator=torch.Generator().manual_seed(1337),
|
54 |
).frames[0]
|
55 |
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
return "output.gif"
|
58 |
|
59 |
demo = gr.Interface(
|
60 |
fn=generate_video,
|
61 |
inputs=[
|
62 |
-
gr.Textbox(label="Prompt (ํ๊ธ ๋๋ ์์ด)", value="
|
63 |
gr.Textbox(label="Negative Prompt (ํ๊ธ ๋๋ ์์ด)", value="์ ํ์ง, ์ต์
์ ํ์ง, ๋ ํฐ๋ฐ์ค"),
|
64 |
-
gr.Slider(label="Number of Inference Steps", minimum=1, maximum=200, step=1, value=
|
65 |
gr.Textbox(label="Conditioning Frame Indices", value="[0, 8, 15]"),
|
66 |
-
gr.Slider(label="ControlNet Conditioning Scale", minimum=0.1, maximum=2.0, step=0.1, value=1.0)
|
|
|
|
|
|
|
67 |
],
|
68 |
outputs=gr.Image(label="Generated Video"),
|
69 |
-
title="AnimateDiffSparseControlNetPipeline์ ์ฌ์ฉํ ๋น๋์ค ์์ฑ",
|
70 |
-
description="AnimateDiffSparseControlNetPipeline์ ์ฌ์ฉํ์ฌ ๋น๋์ค๋ฅผ ์์ฑํฉ๋๋ค. ํ๊ธ ๋๋ ์์ด๋ก ํ๋กฌํํธ๋ฅผ ์
๋ ฅํ ์ ์์ต๋๋ค."
|
71 |
)
|
72 |
|
73 |
demo.launch()
|
|
|
3 |
from diffusers import AnimateDiffPipeline, MotionAdapter, DPMSolverMultistepScheduler, AutoencoderKL, SparseControlNetModel
|
4 |
from diffusers.utils import export_to_gif, load_image
|
5 |
from transformers import pipeline
|
6 |
+
from PIL import Image
|
7 |
+
import numpy as np
|
8 |
|
9 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
10 |
|
|
|
11 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
12 |
|
13 |
def translate_korean_to_english(text):
|
|
|
16 |
return translated
|
17 |
return text
|
18 |
|
19 |
+
def generate_video(prompt, negative_prompt, num_inference_steps, conditioning_frame_indices, controlnet_conditioning_scale, width, height, num_frames):
|
20 |
prompt = translate_korean_to_english(prompt)
|
21 |
negative_prompt = translate_korean_to_english(negative_prompt)
|
22 |
|
|
|
51 |
conditioning_frames=conditioning_frames,
|
52 |
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
53 |
controlnet_frame_indices=conditioning_frame_indices,
|
54 |
+
width=width,
|
55 |
+
height=height,
|
56 |
+
num_frames=num_frames,
|
57 |
generator=torch.Generator().manual_seed(1337),
|
58 |
).frames[0]
|
59 |
|
60 |
+
# ํ์ฒ๋ฆฌ: ํ๋ ์ ๊ฐ ๋ณด๊ฐ์ ํตํ ๋ถ๋๋ฌ์ด ์ ํ
|
61 |
+
interpolated_frames = []
|
62 |
+
for i in range(len(video) - 1):
|
63 |
+
interpolated_frames.append(video[i])
|
64 |
+
interpolated_frames.append(Image.blend(video[i], video[i+1], 0.5))
|
65 |
+
interpolated_frames.append(video[-1])
|
66 |
+
|
67 |
+
export_to_gif(interpolated_frames, "output.gif")
|
68 |
return "output.gif"
|
69 |
|
70 |
demo = gr.Interface(
|
71 |
fn=generate_video,
|
72 |
inputs=[
|
73 |
+
gr.Textbox(label="Prompt (ํ๊ธ ๋๋ ์์ด)", value="๊ท์ฌ์ด ๊ฐ์์ง๊ฐ ์กฐ์ฉํ ์ง๊ณ ์, ๊ฑธ์, ๊ณ ํ์ง"),
|
74 |
gr.Textbox(label="Negative Prompt (ํ๊ธ ๋๋ ์์ด)", value="์ ํ์ง, ์ต์
์ ํ์ง, ๋ ํฐ๋ฐ์ค"),
|
75 |
+
gr.Slider(label="Number of Inference Steps", minimum=1, maximum=200, step=1, value=150),
|
76 |
gr.Textbox(label="Conditioning Frame Indices", value="[0, 8, 15]"),
|
77 |
+
gr.Slider(label="ControlNet Conditioning Scale", minimum=0.1, maximum=2.0, step=0.1, value=1.0),
|
78 |
+
gr.Slider(label="Width", minimum=256, maximum=1024, step=64, value=512),
|
79 |
+
gr.Slider(label="Height", minimum=256, maximum=1024, step=64, value=512),
|
80 |
+
gr.Slider(label="Number of Frames", minimum=16, maximum=128, step=16, value=64)
|
81 |
],
|
82 |
outputs=gr.Image(label="Generated Video"),
|
83 |
+
title="AnimateDiffSparseControlNetPipeline์ ์ฌ์ฉํ ๊ณ ํ์ง ๋น๋์ค ์์ฑ",
|
84 |
+
description="AnimateDiffSparseControlNetPipeline์ ์ฌ์ฉํ์ฌ ๊ณ ํ์ง ๋น๋์ค๋ฅผ ์์ฑํฉ๋๋ค. ํ๊ธ ๋๋ ์์ด๋ก ํ๋กฌํํธ๋ฅผ ์
๋ ฅํ ์ ์์ต๋๋ค."
|
85 |
)
|
86 |
|
87 |
demo.launch()
|