|
import gradio as gr |
|
import requests |
|
import time |
|
import json |
|
from contextlib import closing |
|
from websocket import create_connection |
|
from deep_translator import GoogleTranslator |
|
from langdetect import detect |
|
import os |
|
from PIL import Image |
|
import io |
|
import base64 |
|
import re |
|
from gradio_client import Client |
|
from fake_useragent import UserAgent |
|
import random |
|
|
|
|
|
def flip_text(prompt, negative_prompt, task, steps, sampler, cfg_scale, seed): |
|
result = {"prompt": prompt,"negative_prompt": negative_prompt,"task": task,"steps": steps,"sampler": sampler,"cfg_scale": cfg_scale,"seed": seed} |
|
print(result) |
|
try: |
|
language = detect(prompt) |
|
if language == 'ru': |
|
prompt = GoogleTranslator(source='ru', target='en').translate(prompt) |
|
print(prompt) |
|
except: |
|
pass |
|
|
|
prompt = re.sub(r'[^a-zA-Zа-яА-Я\s]', '', prompt) |
|
|
|
cfg = int(cfg_scale) |
|
steps = int(steps) |
|
seed = int(seed) |
|
|
|
width = 1024 |
|
height = 1024 |
|
|
|
|
|
|
|
|
|
|
|
|
|
print("--3-->", url_sd3) |
|
print("--4-->", url_sd4) |
|
|
|
|
|
|
|
|
|
if task == "Playground v2": |
|
playground = str(os.getenv("playground")) |
|
with closing(create_connection("wss://ashrafb-arpr.hf.space/queue/join", timeout=60)) as conn: |
|
conn.send('{"fn_index":0,"session_hash":""}') |
|
conn.send(f'{{"fn_index":0,"data":["{prompt}"],"session_hash":""}}') |
|
conn.recv() |
|
conn.recv() |
|
conn.recv() |
|
conn.recv() |
|
a = conn.recv() |
|
print(">> A:", a) |
|
photo = json.loads(a)['output']['data'][0] |
|
photo = photo.replace('data:image/jpeg;base64,', '').replace('data:image/png;base64,', '') |
|
photo = Image.open(io.BytesIO(base64.decodebytes(bytes(photo, "utf-8")))) |
|
return photo |
|
|
|
if task == "Artigen v3": |
|
artigen = str(os.getenv("artigen")) |
|
with closing(create_connection("wss://ashrafb-arv3s.hf.space/queue/join", timeout=60)) as conn: |
|
conn.send('{"fn_index":0,"session_hash":""}') |
|
conn.send(f'{{"fn_index":0,"data":["{prompt}", 0, "No style"],"session_hash":""}}') |
|
conn.recv() |
|
conn.recv() |
|
conn.recv() |
|
conn.recv() |
|
a = conn.recv() |
|
print(">> A:", a) |
|
photo = json.loads(a)['output']['data'][0] |
|
photo = photo.replace('data:image/jpeg;base64,', '').replace('data:image/png;base64,', '') |
|
photo = Image.open(io.BytesIO(base64.decodebytes(bytes(photo, "utf-8")))) |
|
return photo |
|
|
|
try: |
|
ua = UserAgent() |
|
headers = { |
|
'authority': 'ehristoforu-dalle-3-xl-lora-v2.hf.space', |
|
'accept': 'text/event-stream', |
|
'accept-language': 'ru,en;q=0.9,la;q=0.8,ja;q=0.7', |
|
'cache-control': 'no-cache', |
|
'referer': 'https://ehristoforu-dalle-3-xl-lora-v2.hf.space/?__theme=light', |
|
'sec-ch-ua': '"Not_A Brand";v="8", "Chromium";v="120", "YaBrowser";v="24.1", "Yowser";v="2.5"', |
|
'sec-ch-ua-mobile': '?0', |
|
'sec-ch-ua-platform': '"Windows"', |
|
'sec-fetch-dest': 'empty', |
|
'sec-fetch-mode': 'cors', |
|
'sec-fetch-site': 'same-origin', |
|
'user-agent': f'{ua.random}' |
|
} |
|
client = Client("ehristoforu/dalle-3-xl-lora-v2", headers=headers) |
|
result = client.predict(prompt,"(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",True,0,1024,1024,6,True, api_name='/run') |
|
return result[0][0]['image'] |
|
except: |
|
try: |
|
ua = UserAgent() |
|
headers = { |
|
'authority': 'nymbo-sd-xl.hf.space', |
|
'accept': 'text/event-stream', |
|
'accept-language': 'ru,en;q=0.9,la;q=0.8,ja;q=0.7', |
|
'cache-control': 'no-cache', |
|
'referer': 'https://nymbo-sd-xl.hf.space/?__theme=light', |
|
'sec-ch-ua': '"Not_A Brand";v="8", "Chromium";v="120", "YaBrowser";v="24.1", "Yowser";v="2.5"', |
|
'sec-ch-ua-mobile': '?0', |
|
'sec-ch-ua-platform': '"Windows"', |
|
'sec-fetch-dest': 'empty', |
|
'sec-fetch-mode': 'cors', |
|
'sec-fetch-site': 'same-origin', |
|
'user-agent': f'{ua.random}' |
|
} |
|
client = Client("Nymbo/SD-XL", headers=headers) |
|
result = client.predict(prompt,negative_prompt,"","",True,False,False,0,1024,1024,7,1,25,25,False,api_name="/run") |
|
return result |
|
except: |
|
ua = UserAgent() |
|
headers = { |
|
'authority': 'radames-real-time-text-to-image-sdxl-lightning.hf.space', |
|
'accept': 'text/event-stream', |
|
'accept-language': 'ru,en;q=0.9,la;q=0.8,ja;q=0.7', |
|
'cache-control': 'no-cache', |
|
'referer': 'https://radames-real-time-text-to-image-sdxl-lightning.hf.space/?__theme=light', |
|
'sec-ch-ua': '"Not_A Brand";v="8", "Chromium";v="120", "YaBrowser";v="24.1", "Yowser";v="2.5"', |
|
'sec-ch-ua-mobile': '?0', |
|
'sec-ch-ua-platform': '"Windows"', |
|
'sec-fetch-dest': 'empty', |
|
'sec-fetch-mode': 'cors', |
|
'sec-fetch-site': 'same-origin', |
|
'user-agent': f'{ua.random}' |
|
} |
|
client = Client("radames/Real-Time-Text-to-Image-SDXL-Lightning", headers=headers) |
|
result = client.predict(prompt, [], 0, random.randint(1, 999999), fn_index=0) |
|
return result |
|
|
|
|
|
def mirror(image_output, scale_by, method, gfpgan, codeformer): |
|
|
|
url_up = os.getenv("url_up") |
|
url_up_f = os.getenv("url_up_f") |
|
|
|
print("~~ up", url_up) |
|
print("~~ f", url_up_f) |
|
|
|
scale_by = int(scale_by) |
|
gfpgan = int(gfpgan) |
|
codeformer = int(codeformer) |
|
|
|
with open(image_output, "rb") as image_file: |
|
encoded_string2 = base64.b64encode(image_file.read()) |
|
encoded_string2 = str(encoded_string2).replace("b'", '') |
|
|
|
encoded_string2 = "data:image/png;base64," + encoded_string2 |
|
data = {"fn_index":81,"data":[0,0,encoded_string2,None,"","",True,gfpgan,codeformer,0,scale_by,512,512,None,method,"None",1,False,[],"",""],"session_hash":""} |
|
print(data) |
|
r = requests.post(f"{url_up}", json=data, timeout=100) |
|
print(r.text) |
|
ph = f"{url_up_f}" + str(r.json()['data'][0][0]['name']) |
|
return ph |
|
|
|
css = """ |
|
#generate { |
|
width: 100%; |
|
background: #e253dd !important; |
|
border: none; |
|
border-radius: 50px; |
|
outline: none !important; |
|
color: white; |
|
} |
|
#generate:hover { |
|
background: #de6bda !important; |
|
outline: none !important; |
|
color: #fff; |
|
} |
|
footer {visibility: hidden !important;} |
|
#image_output { |
|
height: 100% !important; |
|
} |
|
""" |
|
|
|
with gr.Blocks(css=css) as demo: |
|
|
|
with gr.Tab("Базовые настройки"): |
|
with gr.Row(): |
|
prompt = gr.Textbox(placeholder="Введите описание изображения...", show_label=True, label='Описание изображения:', lines=3) |
|
with gr.Row(): |
|
task = gr.Radio(interactive=True, value="Stable Diffusion XL 1.0", show_label=True, label="Модель нейросети:", choices=['Stable Diffusion XL 1.0', 'Crystal Clear XL', |
|
'Juggernaut XL', 'DreamShaper XL', |
|
'SDXL Niji', 'Cinemax SDXL', 'NightVision XL', |
|
'Playground v2', 'Artigen v3']) |
|
with gr.Tab("Расширенные настройки"): |
|
with gr.Row(): |
|
negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=True, label='Negative Prompt:', lines=3, value="[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry") |
|
with gr.Row(): |
|
sampler = gr.Dropdown(value="DPM++ SDE Karras", show_label=True, label="Sampling Method:", choices=[ |
|
"Euler", "Euler a", "Heun", "DPM++ 2M", "DPM++ SDE", "DPM++ 2M Karras", "DPM++ SDE Karras", "DDIM"]) |
|
with gr.Row(): |
|
steps = gr.Slider(show_label=True, label="Sampling Steps:", minimum=1, maximum=50, value=35, step=1) |
|
with gr.Row(): |
|
cfg_scale = gr.Slider(show_label=True, label="CFG Scale:", minimum=1, maximum=20, value=7, step=1) |
|
with gr.Row(): |
|
seed = gr.Number(show_label=True, label="Seed:", minimum=-1, maximum=1000000, value=-1, step=1) |
|
|
|
with gr.Tab("Настройки апскейлинга"): |
|
with gr.Column(): |
|
with gr.Row(): |
|
scale_by = gr.Number(show_label=True, label="Во сколько раз увеличить:", minimum=1, maximum=2, value=2, step=1) |
|
with gr.Row(): |
|
method = gr.Dropdown(show_label=True, value="ESRGAN_4x", label="Алгоритм увеличения", choices=["ScuNET GAN", "SwinIR 4x", "ESRGAN_4x", "R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"]) |
|
with gr.Column(): |
|
with gr.Row(): |
|
gfpgan = gr.Slider(show_label=True, label="Эффект GFPGAN (для улучшения лица)", minimum=0, maximum=1, value=0, step=0.1) |
|
with gr.Row(): |
|
codeformer = gr.Slider(show_label=True, label="Эффект CodeFormer (для улучшения лица)", minimum=0, maximum=1, value=0, step=0.1) |
|
|
|
with gr.Column(): |
|
text_button = gr.Button("Сгенерировать изображение", variant='primary', elem_id="generate") |
|
with gr.Column(): |
|
image_output = gr.Image(show_download_button=True, interactive=False, label='Результат:', elem_id='image_output', type='filepath') |
|
text_button.click(flip_text, inputs=[prompt, negative_prompt, task, steps, sampler, cfg_scale, seed], outputs=image_output) |
|
|
|
img2img_b = gr.Button("Увеличить изображение", variant='secondary') |
|
image_i2i = gr.Image(show_label=True, label='Увеличенное изображение:') |
|
img2img_b.click(mirror, inputs=[image_output, scale_by, method, gfpgan, codeformer], outputs=image_i2i) |
|
|
|
demo.queue(concurrency_count=24) |
|
demo.launch() |