Spaces:
Running
Running
import gradio as gr | |
import requests | |
import io | |
import random | |
import os | |
import time | |
import numpy as np | |
import subprocess | |
import torch | |
from transformers import AutoProcessor, AutoModelForCausalLM | |
from PIL import Image | |
from deep_translator import GoogleTranslator | |
import json | |
from datetime import datetime | |
from fastapi import FastAPI | |
app = FastAPI() | |
#----------Start of theme---------- | |
theme = gr.themes.Soft( | |
primary_hue="zinc", | |
secondary_hue="stone", | |
font=[gr.themes.GoogleFont('Kavivanar'), gr.themes.GoogleFont('Kavivanar'), 'system-ui', 'sans-serif'], | |
font_mono=[gr.themes.GoogleFont('Source Code Pro'), gr.themes.GoogleFont('Inconsolata'), gr.themes.GoogleFont('Inconsolata'), 'monospace'], | |
).set( | |
body_background_fill='*primary_100', | |
body_text_color='secondary_600', | |
body_text_color_subdued='*primary_500', | |
body_text_weight='500', | |
background_fill_primary='*primary_100', | |
background_fill_secondary='*secondary_200', | |
color_accent='*primary_300', | |
border_color_accent_subdued='*primary_400', | |
border_color_primary='*primary_400', | |
block_background_fill='*primary_300', | |
block_border_width='*panel_border_width', | |
block_info_text_color='*primary_700', | |
block_info_text_size='*text_md', | |
panel_background_fill='*primary_200', | |
accordion_text_color='*primary_600', | |
table_text_color='*primary_600', | |
input_background_fill='*primary_50', | |
input_background_fill_focus='*primary_100', | |
button_primary_background_fill='*primary_500', | |
button_primary_background_fill_hover='*primary_400', | |
button_primary_text_color='*primary_50', | |
button_primary_text_color_hover='*primary_100', | |
button_cancel_background_fill='*primary_500', | |
button_cancel_background_fill_hover='*primary_400' | |
) | |
#----------End of theme---------- | |
API_TOKEN = os.getenv("HF_READ_TOKEN") | |
headers = {"Authorization": f"Bearer {API_TOKEN}"} | |
timeout = 100 | |
def flip_image(x): | |
return np.fliplr(x) | |
def clear(): | |
return None | |
def query(lora_id, prompt, is_negative=False, steps=28, cfg_scale=3.5, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=896, height=1152): | |
if prompt == "" or prompt == None: | |
return None | |
if lora_id.strip() == "" or lora_id == None: | |
lora_id = "black-forest-labs/FLUX.1-dev" | |
key = random.randint(0, 999) | |
API_URL = "https://api-inference.huggingface.co/models/"+ lora_id.strip() | |
API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")]) | |
headers = {"Authorization": f"Bearer {API_TOKEN}"} | |
# prompt = GoogleTranslator(source='ru', target='en').translate(prompt) | |
# print(f'\033[1mGeneration {key} translation:\033[0m {prompt}') | |
prompt = GoogleTranslator(source='ru', target='en').translate(prompt) | |
print(f'\033[1mGeneration {key} translation:\033[0m {prompt}') | |
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect." | |
print(f'\033[1mGeneration {key}:\033[0m {prompt}') | |
# If seed is -1, generate a random seed and use it | |
if seed == -1: | |
seed = random.randint(1, 1000000000) | |
# Prepare the payload for the API call, including width and height | |
payload = { | |
"inputs": prompt, | |
"is_negative": is_negative, | |
"steps": steps, | |
"cfg_scale": cfg_scale, | |
"seed": seed if seed != -1 else random.randint(1, 1000000000), | |
"strength": strength, | |
"parameters": { | |
"width": width, # Pass the width to the API | |
"height": height # Pass the height to the API | |
} | |
} | |
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout) | |
if response.status_code != 200: | |
print(f"Error: Failed to get image. Response status: {response.status_code}") | |
print(f"Response content: {response.text}") | |
if response.status_code == 503: | |
raise gr.Error(f"{response.status_code} : The model is being loaded") | |
raise gr.Error(f"{response.status_code}") | |
try: | |
image_bytes = response.content | |
image = Image.open(io.BytesIO(image_bytes)) | |
print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})') | |
return image, seed | |
except Exception as e: | |
print(f"Error when trying to open the image: {e}") | |
return None | |
examples = [ | |
"a beautiful woman with blonde hair and blue eyes", | |
"a beautiful woman with brown hair and grey eyes", | |
"a beautiful woman with black hair and brown eyes", | |
] | |
css = """ | |
#app-container { | |
max-width: 930px; | |
margin-left: auto; | |
margin-right: auto; | |
} | |
".gradio-container {background: url('file=abstract.jpg')} | |
""" | |
with gr.Blocks(theme=theme, css=css, elem_id="app-container") as app: | |
gr.HTML("<center><h6>🎨 FLUX.1-Dev with LoRA 🇬🇧</h6></center>") | |
with gr.Tab("Text to Image"): | |
with gr.Column(elem_id="app-container"): | |
with gr.Row(): | |
with gr.Column(elem_id="prompt-container"): | |
with gr.Row(): | |
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=2, elem_id="prompt-text-input") | |
with gr.Row(): | |
with gr.Accordion("Lora trigger words", open=False): | |
gr.Markdown(""" | |
### 🎨 Lora trigger words: | |
- **sdxl-realistic**: szn style | |
- **stylesdxl-cyberpunk**: szn style | |
- **surreal-harmony**: Surreal Harmony | |
- **extremely-detailed**: extremely detailed | |
- **dark-fantasy**: Dark Fantasy | |
- **analogredmond**: AnalogRedmAF | |
- **jules-bastien-lepage-style**: Jules Bastien Lepage Style | |
- **john-singer-sargent-style**: John Singer Sargent Style | |
- **alphonse-mucha-style**: Alphonse Mucha Style | |
- **ultra-realistic-illustration**: ultra realistic illustration | |
- **eye-catching**: eye-catching | |
- **john-constable-style**: John Constable Style | |
- **film-noir**: in the style of FLMNR | |
- **director-sofia-coppola-style**: Director Sofia Coppola Style | |
""") | |
with gr.Row(): | |
custom_lora = gr.Dropdown([" ", "jwu114/lora-sdxl-realistic", "issaccyj/lora-sdxl-cyberpunk", "hugovntr/flux-schnell-realism", "fofr/sdxl-deep-down", "KappaNeuro/surreal-harmony", "ntc-ai/SDXL-LoRA-slider.extremely-detailed", "prithivMLmods/Canopus-LoRA-Flux-FaceRealism", "KappaNeuro/dark-fantasy", "artificialguybr/analogredmond", "KappaNeuro/jules-bastien-lepage-style", "KappaNeuro/john-singer-sargent-style", "KappaNeuro/alphonse-mucha-style", "ntc-ai/SDXL-LoRA-slider.ultra-realistic-illustration", "ntc-ai/SDXL-LoRA-slider.eye-catching", "KappaNeuro/john-constable-style", "dvyio/flux-lora-film-noir", "KappaNeuro/director-sofia-coppola-style"], label="Custom LoRA",) | |
with gr.Row(): | |
with gr.Accordion("⚙️ Advanced Settings", open=False, elem_id="settings-container"): | |
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value=" bad anatomy, bad hands, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, worst face, three crus, extra crus, fused crus, worst feet, three feet, fused feet, fused thigh, three thigh, fused thigh, extra thigh, worst thigh, missing fingers, extra fingers, ugly fingers, long fingers, horn, extra eyes, huge eyes, 2girl, amputation, disconnected limbs, cartoon, cg, 3d, unreal, animate", lines=3, elem_id="negative-prompt-text-input") | |
with gr.Row(): | |
width = gr.Slider(label="Width", value=896, minimum=64, maximum=1216, step=32) | |
height = gr.Slider(label="Height", value=1152, minimum=64, maximum=1216, step=32) | |
steps = gr.Slider(label="Sampling steps", value=50, minimum=1, maximum=100, step=1) | |
cfg = gr.Slider(label="CFG Scale", value=3.5, minimum=1, maximum=20, step=0.5) | |
method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "DEIS", "LMS", "DPM Adaptive", "DPM++ 2M", "DPM++ S", "DPM++ SDE", "DDPM", "DPM Fast", "Euler", "Euler CFG PP", "Euler a", "Euler+beta", "Heun", "Heun PP2", "DDIM", "PLMS", "UniPC", "UniPC BH2"]) | |
strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001) | |
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1) | |
with gr.Row(): | |
with gr.Accordion("🫘Seed", open=False): | |
seed_output = gr.Textbox(label="Seed Used", show_copy_button = True, elem_id="seed-output") | |
with gr.Row(): | |
text_button = gr.Button("Run", variant='primary', elem_id="gen-button") | |
clr_button =gr.Button("Clear Prompt",variant="primary", elem_id="clear_button") | |
clr_button.click(lambda: gr.Textbox(value=""), None, text_prompt) | |
with gr.Row(): | |
image_output = gr.Image(type="pil", label="Image Output", format="png", elem_id="gallery") | |
with gr.Row(): | |
clear_btn = gr.Button(value="Clear Image", variant="primary", elem_id="clear_button") | |
clear_btn.click(clear, inputs=[], outputs=[image_output]) | |
gr.Examples( | |
examples = examples, | |
inputs = [text_prompt], | |
) | |
text_button.click(query, inputs=[custom_lora, text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=[image_output, seed_output]) | |
with gr.Tab("Flip Image"): | |
with gr.Row(): | |
image_input = gr.Image() | |
image_output = gr.Image(format="png") | |
with gr.Row(): | |
image_button = gr.Button("Run", variant='primary') | |
image_button.click(flip_image, inputs=image_input, outputs=image_output) | |
if __name__ == "__main__": | |
app.launch(show_api=False, share=False) | |