Spaces:
Runtime error
Runtime error
lalashechka
commited on
Commit
•
1198eae
1
Parent(s):
16d4e4c
Update app.py
Browse files
app.py
CHANGED
@@ -17,119 +17,141 @@ import re
|
|
17 |
from gradio_client import Client
|
18 |
|
19 |
|
20 |
-
def animate_img(encoded_string):
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
url_sd3 = os.getenv("url_sd3")
|
58 |
url_sd4 = os.getenv("url_sd4")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
c = 0
|
65 |
-
while c <
|
66 |
-
|
67 |
-
|
|
|
|
|
68 |
c += 1
|
69 |
-
time.sleep(1)
|
70 |
continue
|
71 |
-
if
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
-
image_bytes = base64.b64decode(base64_string)
|
77 |
-
with tempfile.NamedTemporaryFile(delete=False) as temp:
|
78 |
-
temp.write(image_bytes)
|
79 |
-
temp_file_path = temp.name
|
80 |
-
print("cs_1")
|
81 |
-
|
82 |
|
83 |
-
except:
|
84 |
-
print("c_2")
|
85 |
-
with closing(create_connection(f"{url_sd4}", timeout=120)) as conn:
|
86 |
-
conn.send('{"fn_index":0,"session_hash":""}')
|
87 |
-
conn.send(f'{{"data":["{prompt}","[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry","dreamshaperXL10_alpha2.safetensors [c8afe2ef]",30,"DPM++ 2M Karras",7,1024,1024,-1],"event_data":null,"fn_index":0,"session_hash":""}}')
|
88 |
-
conn.recv()
|
89 |
-
conn.recv()
|
90 |
-
conn.recv()
|
91 |
-
conn.recv()
|
92 |
-
photo = json.loads(conn.recv())['output']['data'][0]
|
93 |
-
base64_string = photo.replace('data:image/jpeg;base64,', '').replace('data:image/png;base64,', '')
|
94 |
-
|
95 |
-
image_bytes = base64.b64decode(base64_string)
|
96 |
-
with tempfile.NamedTemporaryFile(delete=False) as temp:
|
97 |
-
temp.write(image_bytes)
|
98 |
-
temp_file_path = temp.name
|
99 |
-
print("cs_2")
|
100 |
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
time.sleep(10)
|
106 |
c = 0
|
107 |
-
while c <
|
108 |
-
r2 = requests.
|
109 |
-
|
110 |
-
if
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
sd_video = []
|
118 |
-
for match in matches:
|
119 |
-
sd_video.append(f"https://storage.stable-video-diffusion.com/{match}.mp4")
|
120 |
-
print(sd_video[0])
|
121 |
-
if len(sd_video) != 0:
|
122 |
-
return sd_video[0]
|
123 |
-
else:
|
124 |
-
_ = 1/0
|
125 |
-
except:
|
126 |
-
client1 = Client("https://emmadrex-stable-video-diffusion.hf.space")
|
127 |
-
result1 = client1.predict(encoded_string, api_name="/resize_image")
|
128 |
-
client = Client("https://emmadrex-stable-video-diffusion.hf.space")
|
129 |
-
result = client.predict(result1, 0, True, 1, 15, api_name="/video")
|
130 |
-
return result[0]['video']
|
131 |
-
|
132 |
-
|
133 |
def flip_text1(prompt, motion):
|
134 |
try:
|
135 |
language = detect(prompt)
|
@@ -269,25 +291,27 @@ with gr.Blocks(css=css) as demo:
|
|
269 |
with gr.Tab("Сгенерировать видео"):
|
270 |
with gr.Column():
|
271 |
prompt = gr.Textbox(placeholder="Введите описание видео...", show_label=True, label='Описание:', lines=3)
|
272 |
-
motion1 = gr.Dropdown(value="Приближение →←", interactive=True, show_label=True, label="Движение камеры:", choices=[
|
273 |
-
|
|
|
274 |
with gr.Column():
|
275 |
text_button = gr.Button("Сгенерировать видео", variant='primary', elem_id="generate")
|
276 |
with gr.Column():
|
277 |
video_output = gr.Video(show_label=True, label='Результат:', type="file")
|
278 |
-
text_button.click(create_video, inputs=[prompt], outputs=video_output)
|
279 |
|
280 |
with gr.Tab("Анимировать изображение"):
|
281 |
with gr.Column():
|
282 |
prompt2 = gr.Image(show_label=True, interactive=True, type='filepath', label='Исходное изображение:')
|
283 |
prompt12 = gr.Textbox(placeholder="Введите описание видео...", show_label=True, label='Описание видео (опционально):', lines=3)
|
284 |
-
motion2 = gr.Dropdown(value="Приближение →←", interactive=True, show_label=True, label="Движение камеры:", choices=[
|
285 |
-
|
|
|
286 |
with gr.Column():
|
287 |
text_button2 = gr.Button("Анимировать изображение", variant='primary', elem_id="generate")
|
288 |
with gr.Column():
|
289 |
video_output2 = gr.Video(show_label=True, label='Результат:', type="file")
|
290 |
-
text_button2.click(animate_img, inputs=[prompt2], outputs=video_output2)
|
291 |
|
292 |
demo.queue(concurrency_count=12)
|
293 |
demo.launch()
|
|
|
17 |
from gradio_client import Client
|
18 |
|
19 |
|
20 |
+
def animate_img(encoded_string, model):
|
21 |
+
if model == "Stable Video Diffusion":
|
22 |
+
try:
|
23 |
+
r = requests.post("https://stable-video-diffusion.com/api/upload", files={"file": open(encoded_string, 'rb')})
|
24 |
+
hash_ = r.json()['hash']
|
25 |
+
time.sleep(10)
|
26 |
+
c = 0
|
27 |
+
while c < 10:
|
28 |
+
r2 = requests.get(f"https://stable-video-diffusion.com/result?hash={hash_}")
|
29 |
+
source_string = r2.text
|
30 |
+
if "Generation has been in progress for" in source_string:
|
31 |
+
time.sleep(15)
|
32 |
+
c += 1
|
33 |
+
continue
|
34 |
+
if "Generation has been in progress for" not in source_string:
|
35 |
+
pattern = r'https://storage.stable-video-diffusion.com/([a-f0-9]{32})\.mp4'
|
36 |
+
matches = re.findall(pattern, source_string)
|
37 |
+
sd_video = []
|
38 |
+
for match in matches:
|
39 |
+
sd_video.append(f"https://storage.stable-video-diffusion.com/{match}.mp4")
|
40 |
+
if len(sd_video) != 0:
|
41 |
+
print("s_1")
|
42 |
+
return sd_video[0]
|
43 |
+
else:
|
44 |
+
_ = 1/0
|
45 |
+
print("f_1")
|
46 |
+
except:
|
47 |
+
print("2")
|
48 |
+
client1 = Client("https://emmadrex-stable-video-diffusion.hf.space")
|
49 |
+
result1 = client1.predict(encoded_string, api_name="/resize_image")
|
50 |
+
client = Client("https://emmadrex-stable-video-diffusion.hf.space")
|
51 |
+
result = client.predict(result1, 0, True, 1, 15, api_name="/video")
|
52 |
+
res = result[0]['video']
|
53 |
+
print("s_2")
|
54 |
+
return res
|
55 |
|
56 |
+
if model == "AnimateDiff":
|
57 |
+
client = Client("https://ap123-animateimage.hf.space/--replicas/zlwk6/")
|
58 |
+
result = client.predict(encoded_string, "zoom-out", api_name="/predict")
|
59 |
+
return result
|
60 |
+
|
61 |
+
|
62 |
+
def create_video(prompt, model):
|
63 |
url_sd3 = os.getenv("url_sd3")
|
64 |
url_sd4 = os.getenv("url_sd4")
|
65 |
+
if model == "Stable Video Diffusion":
|
66 |
+
try:
|
67 |
+
with closing(create_connection(f"{url_sd3}", timeout=120)) as conn:
|
68 |
+
conn.send('{"fn_index":3,"session_hash":""}')
|
69 |
+
conn.send(f'{{"data":["{prompt}","[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry",7.5,"(No style)"],"event_data":null,"fn_index":3,"session_hash":""}}')
|
70 |
+
c = 0
|
71 |
+
while c < 60:
|
72 |
+
status = json.loads(conn.recv())['msg']
|
73 |
+
if status == 'estimation':
|
74 |
+
c += 1
|
75 |
+
time.sleep(1)
|
76 |
+
continue
|
77 |
+
if status == 'process_starts':
|
78 |
+
break
|
79 |
+
photo = json.loads(conn.recv())['output']['data'][0][0]
|
80 |
+
base64_string = photo.replace('data:image/jpeg;base64,', '').replace('data:image/png;base64,', '')
|
81 |
+
|
82 |
+
image_bytes = base64.b64decode(base64_string)
|
83 |
+
with tempfile.NamedTemporaryFile(delete=False) as temp:
|
84 |
+
temp.write(image_bytes)
|
85 |
+
temp_file_path = temp.name
|
86 |
+
print("cs_1")
|
87 |
|
88 |
+
|
89 |
+
except:
|
90 |
+
print("c_2")
|
91 |
+
with closing(create_connection(f"{url_sd4}", timeout=120)) as conn:
|
92 |
+
conn.send('{"fn_index":0,"session_hash":""}')
|
93 |
+
conn.send(f'{{"data":["{prompt}","[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry","dreamshaperXL10_alpha2.safetensors [c8afe2ef]",30,"DPM++ 2M Karras",7,1024,1024,-1],"event_data":null,"fn_index":0,"session_hash":""}}')
|
94 |
+
conn.recv()
|
95 |
+
conn.recv()
|
96 |
+
conn.recv()
|
97 |
+
conn.recv()
|
98 |
+
photo = json.loads(conn.recv())['output']['data'][0]
|
99 |
+
base64_string = photo.replace('data:image/jpeg;base64,', '').replace('data:image/png;base64,', '')
|
100 |
+
|
101 |
+
image_bytes = base64.b64decode(base64_string)
|
102 |
+
with tempfile.NamedTemporaryFile(delete=False) as temp:
|
103 |
+
temp.write(image_bytes)
|
104 |
+
temp_file_path = temp.name
|
105 |
+
print("cs_2")
|
106 |
+
|
107 |
+
try:
|
108 |
+
r = requests.post("https://stable-video-diffusion.com/api/upload", files={"file": open(temp_file_path, 'rb')})
|
109 |
+
print(r.text)
|
110 |
+
hash_ = r.json()['hash']
|
111 |
+
time.sleep(10)
|
112 |
c = 0
|
113 |
+
while c < 10:
|
114 |
+
r2 = requests.get(f"https://stable-video-diffusion.com/result?hash={hash_}")
|
115 |
+
source_string = r2.text
|
116 |
+
if "Generation has been in progress for" in source_string:
|
117 |
+
time.sleep(15)
|
118 |
c += 1
|
|
|
119 |
continue
|
120 |
+
if "Generation has been in progress for" not in source_string:
|
121 |
+
pattern = r'https://storage.stable-video-diffusion.com/([a-f0-9]{32})\.mp4'
|
122 |
+
matches = re.findall(pattern, source_string)
|
123 |
+
sd_video = []
|
124 |
+
for match in matches:
|
125 |
+
sd_video.append(f"https://storage.stable-video-diffusion.com/{match}.mp4")
|
126 |
+
print(sd_video[0])
|
127 |
+
if len(sd_video) != 0:
|
128 |
+
return sd_video[0]
|
129 |
+
else:
|
130 |
+
_ = 1/0
|
131 |
+
except:
|
132 |
+
client1 = Client("https://emmadrex-stable-video-diffusion.hf.space")
|
133 |
+
result1 = client1.predict(encoded_string, api_name="/resize_image")
|
134 |
+
client = Client("https://emmadrex-stable-video-diffusion.hf.space")
|
135 |
+
result = client.predict(result1, 0, True, 1, 15, api_name="/video")
|
136 |
+
return result[0]['video']
|
137 |
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
|
140 |
+
|
141 |
+
if model == "AnimateDiff":
|
142 |
+
data = {"prompt": prompt, "negative_prompt": "EasyNegative"}
|
143 |
+
r = requests.post("https://sd.cuilutech.com/sdapi/async/txt2gif", json=data)
|
|
|
144 |
c = 0
|
145 |
+
while c < 60:
|
146 |
+
r2 = requests.post("https://sd.cuilutech.com/sdapi/get_task_info", json={'task_id': r.json()['data']['task_id']})
|
147 |
+
time.sleep(2)
|
148 |
+
if r2.json()['data']:
|
149 |
+
photo = r2.json()['data']['image_urls'][0]
|
150 |
+
break
|
151 |
+
c += 1
|
152 |
+
return photo
|
153 |
+
|
154 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
def flip_text1(prompt, motion):
|
156 |
try:
|
157 |
language = detect(prompt)
|
|
|
291 |
with gr.Tab("Сгенерировать видео"):
|
292 |
with gr.Column():
|
293 |
prompt = gr.Textbox(placeholder="Введите описание видео...", show_label=True, label='Описание:', lines=3)
|
294 |
+
# motion1 = gr.Dropdown(value="Приближение →←", interactive=True, show_label=True, label="Движение камеры:", choices=["Приближение →←", "Отдаление ←→", "Вверх ↑", "Вниз ↓", "Влево ←", "Вправо →", "По часовой стрелке ⟳", "Против часовой стрелки ⟲"])
|
295 |
+
model = gr.Radio(interactive=True, value="Stable Video Diffusion", show_label=True,
|
296 |
+
label="Модель нейросети:", choices=['Stable Video Diffusion', 'AnimateDiff'])
|
297 |
with gr.Column():
|
298 |
text_button = gr.Button("Сгенерировать видео", variant='primary', elem_id="generate")
|
299 |
with gr.Column():
|
300 |
video_output = gr.Video(show_label=True, label='Результат:', type="file")
|
301 |
+
text_button.click(create_video, inputs=[prompt, model], outputs=video_output)
|
302 |
|
303 |
with gr.Tab("Анимировать изображение"):
|
304 |
with gr.Column():
|
305 |
prompt2 = gr.Image(show_label=True, interactive=True, type='filepath', label='Исходное изображение:')
|
306 |
prompt12 = gr.Textbox(placeholder="Введите описание видео...", show_label=True, label='Описание видео (опционально):', lines=3)
|
307 |
+
# motion2 = gr.Dropdown(value="Приближение →←", interactive=True, show_label=True, label="Движение камеры:", choices=["Приближение →←", "Отдаление ←→", "Вверх ↑", "Вниз ↓", "Влево ←", "Вправо →", "По часовой стрелке ⟳", "Против часовой стрелки ⟲"])
|
308 |
+
model2 = gr.Radio(interactive=True, value="Stable Video Diffusion", show_label=True,
|
309 |
+
label="Модель нейросети:", choices=['Stable Video Diffusion', 'AnimateDiff'])
|
310 |
with gr.Column():
|
311 |
text_button2 = gr.Button("Анимировать изображение", variant='primary', elem_id="generate")
|
312 |
with gr.Column():
|
313 |
video_output2 = gr.Video(show_label=True, label='Результат:', type="file")
|
314 |
+
text_button2.click(animate_img, inputs=[prompt2, model2], outputs=video_output2)
|
315 |
|
316 |
demo.queue(concurrency_count=12)
|
317 |
demo.launch()
|