Spaces:
Running
Running
File size: 10,911 Bytes
88405a2 e547b24 5c3de75 e547b24 5c3de75 bcceaa9 e547b24 5c3de75 8d50bf7 e547b24 78539a4 ae6b5a6 c50b0b7 6f5a32e e547b24 c7accf3 143ca85 c7accf3 e547b24 40d7442 001cbbb 143ca85 9be63af e547b24 79e0fd9 143ca85 e547b24 143ca85 3f2e57b 2d04fb1 26785ab 143ca85 e547b24 c50b0b7 e547b24 c50b0b7 f94e79d e547b24 6f5a32e e547b24 143ca85 e547b24 6f5a32e 143ca85 e547b24 6f5a32e e547b24 40d7442 c50b0b7 40d7442 143ca85 e547b24 02f8cfa c50b0b7 02f8cfa 228e953 73f7edc c50b0b7 e547b24 c50b0b7 3c9286b c50b0b7 4431c8f c50b0b7 caf5351 c50b0b7 61ebb83 c50b0b7 143ca85 e547b24 dbc87ba fb509f7 06ca9b2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 |
#import requests
import gradio as gr
import io
import random
import os
import time
import cv2
from PIL import Image
from deep_translator import GoogleTranslator
import json
from fastapi import FastAPI
app = FastAPI()
#----------Start of theme----------
theme = gr.themes.Soft(
primary_hue="zinc",
secondary_hue="stone",
font=[gr.themes.GoogleFont('Kavivanar'), gr.themes.GoogleFont('Kavivanar'), 'system-ui', 'sans-serif'],
font_mono=[gr.themes.GoogleFont('Source Code Pro'), gr.themes.GoogleFont('Inconsolata'), gr.themes.GoogleFont('Inconsolata'), 'monospace'],
).set(
body_background_fill='*primary_100',
body_text_color='secondary_600',
body_text_color_subdued='*primary_500',
body_text_weight='500',
background_fill_primary='*primary_100',
background_fill_secondary='*secondary_200',
color_accent='*primary_300',
border_color_accent_subdued='*primary_400',
border_color_primary='*primary_400',
block_background_fill='*primary_300',
block_border_width='*panel_border_width',
block_info_text_color='*primary_700',
block_info_text_size='*text_md',
panel_background_fill='*primary_200',
accordion_text_color='*primary_600',
table_text_color='*primary_600',
input_background_fill='*primary_50',
input_background_fill_focus='*primary_100',
button_primary_background_fill='*primary_500',
button_primary_background_fill_hover='*primary_400',
button_primary_text_color='*primary_50',
button_primary_text_color_hover='*primary_100',
button_cancel_background_fill='*primary_500',
button_cancel_background_fill_hover='*primary_400'
)
#----------End of theme----------
API_TOKEN = os.getenv("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}
timeout = 100
def flip_image(x):
return np.fliplr(x)
def query(lora_id, prompt, is_negative=False, steps=28, cfg_scale=3.5, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
if prompt == "" or prompt == None:
return None
if lora_id.strip() == "" or lora_id == None:
lora_id = "black-forest-labs/FLUX.1-dev"
key = random.randint(0, 999)
API_URL = "https://api-inference.huggingface.co/models/"+ lora_id.strip()
API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")])
headers = {"Authorization": f"Bearer {API_TOKEN}"}
# prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
# print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
print(f'\033[1mGeneration {key}:\033[0m {prompt}')
# If seed is -1, generate a random seed and use it
if seed == -1:
seed = random.randint(1, 1000000000)
# Prepare the payload for the API call, including width and height
payload = {
"inputs": prompt,
"is_negative": is_negative,
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed if seed != -1 else random.randint(1, 1000000000),
"strength": strength,
"parameters": {
"width": width, # Pass the width to the API
"height": height # Pass the height to the API
}
}
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
if response.status_code != 200:
print(f"Error: Failed to get image. Response status: {response.status_code}")
print(f"Response content: {response.text}")
if response.status_code == 503:
raise gr.Error(f"{response.status_code} : The model is being loaded")
raise gr.Error(f"{response.status_code}")
try:
image_bytes = response.content
image = Image.open(io.BytesIO(image_bytes))
print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
return image, seed
except Exception as e:
print(f"Error when trying to open the image: {e}")
return None
examples = [
"a beautiful woman with blonde hair and blue eyes",
"a beautiful woman with brown hair and grey eyes",
"a beautiful woman with black hair and brown eyes",
]
css = """
#app-container {
max-width: 896px;
margin-left: auto;
margin-right: auto;
#body{background-image:"DigiP-AI/FLUX.Dev-LORA/abstract(1).jpg";}
}
"""
with gr.Blocks(theme=theme, css=css, elem_id="app-container") as app:
gr.HTML("<center><h6>🎨 FLUX.1-Dev with LoRA 🇬🇧</h6></center>")
with gr.Tab("Text to Image"):
with gr.Column(elem_id="app-container"):
with gr.Row():
with gr.Column(elem_id="prompt-container"):
with gr.Row():
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=2, elem_id="prompt-text-input")
with gr.Row():
with gr.Accordion(label="Lora trigger words", open=False):
gr.Markdown("""
- **sdxl-realistic**: szn style
- **stylesdxl-cyberpunk**: szn style
- **maxfield-parrish-stylee**: Maxfield Parrish Style
- **surreal-harmony**: Surreal Harmony
- **extremely-detailed**: extremely detailed
- **dark-fantasy**: Dark Fantasy
- **analogredmond**: AnalogRedmAF
- **jules-bastien-lepage-style**: Jules Bastien Lepage Style
- **john-singer-sargent-style**: John Singer Sargent Style
- **alphonse-mucha-style**: Alphonse Mucha Style
- **ultra-realistic-illustration**: ultra realistic illustration
- **eye-catching**: eye-catching
- **john-constable-style**: John Constable Style
- **film-noir**: in the style of FLMNR
- **director-sofia-coppola-style**: Director Sofia Coppola Style
""",
label="Trigger words")
with gr.Row():
custom_lora = gr.Dropdown([" ", "jwu114/lora-sdxl-realistic", "issaccyj/lora-sdxl-cyberpunk", "KappaNeuro/maxfield-parrish-style", "fofr/sdxl-deep-down", "KappaNeuro/surreal-harmony", "ntc-ai/SDXL-LoRA-slider.extremely-detailed", "prithivMLmods/Canopus-LoRA-Flux-FaceRealism", "KappaNeuro/dark-fantasy", "artificialguybr/analogredmond", "KappaNeuro/jules-bastien-lepage-style", "KappaNeuro/john-singer-sargent-style", "KappaNeuro/alphonse-mucha-style", "ntc-ai/SDXL-LoRA-slider.ultra-realistic-illustration", "ntc-ai/SDXL-LoRA-slider.eye-catching", "KappaNeuro/john-constable-style", "dvyio/flux-lora-film-noir", "KappaNeuro/director-sofia-coppola-style"], label="Custom LoRA", info="Please select from the list")
with gr.Row():
with gr.Accordion("⚙️ Advanced Settings", open=False, elem_id="settings-container"):
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="((((out of frame))), deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos", lines=3, elem_id="negative-prompt-text-input")
with gr.Row():
width = gr.Slider(label="Width", value=1024, minimum=64, maximum=1216, step=32)
height = gr.Slider(label="Height", value=1024, minimum=64, maximum=1216, step=32)
steps = gr.Slider(label="Sampling steps", value=28, minimum=1, maximum=100, step=1)
cfg = gr.Slider(label="CFG Scale", value=3.5, minimum=1, maximum=20, step=0.5)
method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "DPM Fast" "Euler", "Euler a", "Euler+beta", "Heun", "DDIM", "PLMS", "UniPC"])
strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
with gr.Row():
with gr.Accordion("🫘Seed", open=False):
seed_output = gr.Textbox(label="Seed Used", show_copy_button = True, elem_id="seed-output")
with gr.Row():
text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
with gr.Row():
clr_button =gr.Button("Clear",variant="primary", elem_id="clear_button")
clr_button.click(lambda: gr.Textbox(value=""), None, text_prompt)
with gr.Row():
image_output = gr.Image(type="pil", label="Image Output", format="png", elem_id="gallery")
gr.Examples(
examples = examples,
inputs = [text_prompt],
)
text_button.click(query, inputs=[custom_lora, text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=[image_output, seed_output])
with gr.Tab("Image Upscaler"):
with gr.Row():
with gr.Column():
def upscale_image(input_image, radio_input):
upscale_factor = radio_input
output_image = cv2.resize(input_image, None, fx = upscale_factor, fy = upscale_factor, interpolation = cv2.INTER_CUBIC)
return output_image
radio_input = gr.Radio(label="Upscale Levels", choices=[2, 4, 6, 8, 10], value=2)
iface = gr.Interface(fn=upscale_image, inputs = [gr.Image(label="Input Image", interactive=True), radio_input], outputs = gr.Image(label="Upscaled Image", format="png"), title="Image Upscaler")
with gr.Tab("Flip Image"):
with gr.Row():
image_input = gr.Image()
image_output = gr.Image(format="png")
with gr.Row():
image_button = gr.Button("Run", variant='primary')
image_button.click(flip_image, inputs=image_input, outputs=image_output)
app.launch(show_api=False, share=False) |