import gradio as gr import requests import io import random import os import time from PIL import Image from deep_translator import GoogleTranslator import json from fastapi import FastAPI app = FastAPI() #----------Start of theme---------- theme = gr.themes.Soft( primary_hue="zinc", secondary_hue="stone", font=[gr.themes.GoogleFont('Kavivanar'), gr.themes.GoogleFont('Kavivanar'), 'system-ui', 'sans-serif'], font_mono=[gr.themes.GoogleFont('Source Code Pro'), gr.themes.GoogleFont('Inconsolata'), gr.themes.GoogleFont('Inconsolata'), 'monospace'], ).set( body_background_fill='*primary_100', body_text_color='secondary_600', body_text_color_subdued='*primary_500', body_text_weight='500', background_fill_primary='*primary_100', background_fill_secondary='*secondary_200', color_accent='*primary_300', border_color_accent_subdued='*primary_400', border_color_primary='*primary_400', block_background_fill='*primary_300', block_border_width='*panel_border_width', block_info_text_color='*primary_700', block_info_text_size='*text_md', panel_background_fill='*primary_200', accordion_text_color='*primary_600', table_text_color='*primary_600', input_background_fill='*primary_50', input_background_fill_focus='*primary_100', button_primary_background_fill='*primary_500', button_primary_background_fill_hover='*primary_400', button_primary_text_color='*primary_50', button_primary_text_color_hover='*primary_100', button_cancel_background_fill='*primary_500', button_cancel_background_fill_hover='*primary_400' ) #----------End of theme---------- API_TOKEN = os.getenv("HF_READ_TOKEN") headers = {"Authorization": f"Bearer {API_TOKEN}"} timeout = 100 article_text = """

Enjoying the tool? Buy me a coffee and get exclusive prompt guides!

Instantly unlock helpful tips for creating better prompts!

Buy Me a Coffee
""" def query(lora_id, prompt, steps=28, cfg_scale=3.5, randomize_seed=True, seed=-1, width=1024, height=1024): if prompt == "" or prompt == None: return None if lora_id.strip() == "" or lora_id == None: lora_id = "black-forest-labs/FLUX.1-dev" key = random.randint(0, 999) API_URL = "https://api-inference.huggingface.co/models/"+ lora_id.strip() API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")]) headers = {"Authorization": f"Bearer {API_TOKEN}"} # prompt = GoogleTranslator(source='ru', target='en').translate(prompt) # print(f'\033[1mGeneration {key} translation:\033[0m {prompt}') prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect." # print(f'\033[1mGeneration {key}:\033[0m {prompt}') # If seed is -1, generate a random seed and use it if randomize_seed: seed = random.randint(1, 4294967296) payload = { "inputs": prompt, "steps": steps, "cfg_scale": cfg_scale, "seed": seed, "parameters": { "width": width, # Pass the width to the API "height": height # Pass the height to the API } } response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout) if response.status_code != 200: print(f"Error: Failed to get image. Response status: {response.status_code}") print(f"Response content: {response.text}") if response.status_code == 503: raise gr.Error(f"{response.status_code} : The model is being loaded") raise gr.Error(f"{response.status_code}") try: image_bytes = response.content image = Image.open(io.BytesIO(image_bytes)) print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})') return image, seed, seed except Exception as e: print(f"Error when trying to open the image: {e}") return None examples = [ "a tiny astronaut hatching from an egg on the moon", "a cat holding a sign that says hello world", "an anime illustration of a wiener schnitzel", ] css = """ #app-container { max-width: 896px; margin-left: auto; margin-right: auto; } """ with gr.Blocks(theme=theme, css=css) as app: gr.HTML("

FLUX.1-Dev with LoRA support

") with gr.Column(elem_id="app-container"): with gr.Row(): with gr.Column(elem_id="prompt-container"): with gr.Row(): text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=2, elem_id="prompt-text-input") with gr.Row(): with gr.Accordion("Lora trigger words", open=False): gr.Markdown(""" - **sdxl-realistic**: szn style - **stylesdxl-cyberpunk**: szn style - **maxfield-parrish-stylee**: Maxfield Parrish Style - **surreal-harmony**: Surreal Harmony - **extremely-detailed**: extremely detailed - **dark-fantasy**: Dark Fantasy - **analogredmond**: AnalogRedmAF - **jules-bastien-lepage-style**: Jules Bastien Lepage Style - **john-singer-sargent-style**: John Singer Sargent Style - **alphonse-mucha-style**: Alphonse Mucha Style - **ultra-realistic-illustration**: ultra realistic illustration - **eye-catching**: eye-catching - **john-constable-style**: John Constable Style - **film-noir**: in the style of FLMNR - **director-sofia-coppola-style**: Director Sofia Coppola Style """, label="Trigger words") with gr.Row(): custom_lora = gr.Dropdown([" ", "jwu114/lora-sdxl-realistic", "issaccyj/lora-sdxl-cyberpunk", "KappaNeuro/maxfield-parrish-style", "fofr/sdxl-deep-down", "KappaNeuro/surreal-harmony", "ntc-ai/SDXL-LoRA-slider.extremely-detailed", "prithivMLmods/Canopus-LoRA-Flux-FaceRealism", "KappaNeuro/dark-fantasy", "artificialguybr/analogredmond", "KappaNeuro/jules-bastien-lepage-style", "KappaNeuro/john-singer-sargent-style", "KappaNeuro/alphonse-mucha-style", "ntc-ai/SDXL-LoRA-slider.ultra-realistic-illustration", "ntc-ai/SDXL-LoRA-slider.eye-catching", "KappaNeuro/john-constable-style", "dvyio/flux-lora-film-noir", "KappaNeuro/director-sofia-coppola-style"], label="Custom LoRA (Please select)",) with gr.Row(): with gr.Accordion("⚙️ Advanced Settings", open=False, elem_id="settings-container"): negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="((((out of frame))), deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos", lines=3, elem_id="negative-prompt-text-input") with gr.Row(): width = gr.Slider(label="Width", value=1024, minimum=64, maximum=1216, step=32) height = gr.Slider(label="Height", value=1024, minimum=64, maximum=1216, step=32) steps = gr.Slider(label="Sampling steps", value=28, minimum=1, maximum=100, step=1) cfg = gr.Slider(label="CFG Scale", value=3.5, minimum=1, maximum=20, step=0.5) method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "DPM Fast" "Euler", "Euler a", "Euler+beta", "Heun", "DDIM", "PLMS", "UniPC"]) strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001) seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1) with gr.Row(): with gr.Accordion("🫘Seed", open=False): seed_output = gr.Textbox(label="Seed Used", show_copy_button = True, elem_id="seed-output") with gr.Row(): text_button = gr.Button("Run", variant='primary', elem_id="gen-button") with gr.Row(): clr_button =gr.Button("Clear",variant="primary", elem_id="clear_button") clr_button.click(lambda: gr.Textbox(value=""), None, text_prompt) with gr.Row(): image_output = gr.Image(type="pil", label="Image Output", format="png", elem_id="gallery") gr.Examples( examples = examples, inputs = [text_prompt], ) text_button.click(query, inputs=[custom_lora, text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=[image_output, seed_output]) app.launch(show_api=False, share=False)