import gradio as gr import random import os import torch import numpy as np import cv2 from PIL import Image from transformers import AutoProcessor, AutoModelForCausalLM from diffusers import DiffusionPipeline from datetime import datetime from fastapi import FastAPI app = FastAPI() #----------Start of theme---------- theme = gr.themes.Soft( primary_hue="zinc", secondary_hue="stone", font=[gr.themes.GoogleFont('Kavivanar'), gr.themes.GoogleFont('Kavivanar'), 'system-ui', 'sans-serif'], font_mono=[gr.themes.GoogleFont('Source Code Pro'), gr.themes.GoogleFont('Inconsolata'), gr.themes.GoogleFont('Inconsolata'), 'monospace'], ).set( body_background_fill='*primary_100', body_text_color='secondary_600', body_text_color_subdued='*primary_500', body_text_weight='500', background_fill_primary='*primary_100', background_fill_secondary='*secondary_200', color_accent='*primary_300', border_color_accent_subdued='*primary_400', border_color_primary='*primary_400', block_background_fill='*primary_300', block_border_width='*panel_border_width', block_info_text_color='*primary_700', block_info_text_size='*text_md', panel_background_fill='*primary_200', accordion_text_color='*primary_600', table_text_color='*primary_600', input_background_fill='*primary_50', input_background_fill_focus='*primary_100', button_primary_background_fill='*primary_500', button_primary_background_fill_hover='*primary_400', button_primary_text_color='*primary_50', button_primary_text_color_hover='*primary_100', button_cancel_background_fill='*primary_500', button_cancel_background_fill_hover='*primary_400' ) #----------End of theme---------- API_TOKEN = os.getenv("HF_READ_TOKEN") headers = {"Authorization": f"Bearer {API_TOKEN}"} timeout = 100 def query(lora_id, prompt, is_negative=False, steps=28, cfg_scale=3.5, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024): if prompt == "" or prompt == None: return None if lora_id.strip() == "" or lora_id == None: lora_id = "black-forest-labs/FLUX.1-dev" key = random.randint(0, 999) API_URL = "https://api-inference.huggingface.co/models/"+ lora_id.strip() API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")]) headers = {"Authorization": f"Bearer {API_TOKEN}"} # prompt = GoogleTranslator(source='ru', target='en').translate(prompt) # print(f'\033[1mGeneration {key} translation:\033[0m {prompt}') prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect." # print(f'\033[1mGeneration {key}:\033[0m {prompt}') # If seed is -1, generate a random seed and use it if seed == -1: seed = random.randint(1, 1000000000) payload = { "inputs": prompt, "is_negative": is_negative, "steps": steps, "cfg_scale": cfg_scale, "seed": seed if seed != -1 else random.randint(1, 1000000000), "strength": strength, "parameters": { "width": width, # Pass the width to the API "height": height # Pass the height to the API } } response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout) if response.status_code != 200: print(f"Error: Failed to get image. Response status: {response.status_code}") print(f"Response content: {response.text}") if response.status_code == 503: raise gr.Error(f"{response.status_code} : The model is being loaded") raise gr.Error(f"{response.status_code}") try: image_bytes = response.content image = Image.open(io.BytesIO(image_bytes)) print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})') return image, seed, seed except Exception as e: print(f"Error when trying to open the image: {e}") return None examples = [ "a beautiful woman with blonde hair and blue eyes", "a beautiful woman with brown hair and grey eyes", "a beautiful woman with black hair and brown eyes", ] css = """ #app-container { max-width: 896px; margin-left: auto; margin-right: auto; #body{background-image:"DigiP-AI/FLUX.Dev-LORA/abstract(1).jpg";} } """ with gr.Blocks(theme=theme, css=css, elem_id="app-container") as app: gr.HTML("