## Created by ruslanmv.com ## Happy coding! import gradio as gr import torch import numpy as np from diffusers import DiffusionPipeline from transformers import pipeline import subprocess import threading # FastAPI setup def run_fastapi(): subprocess.run(["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "7860"]) pipe = pipeline('text-generation', model='daspartho/prompt-extend') def extend_prompt(prompt): return pipe(prompt+',', num_return_sequences=1)[0]["generated_text"] def text_it(inputs): return extend_prompt(inputs) def load_pipeline(use_cuda): device = "cuda" if use_cuda and torch.cuda.is_available() else "cpu" if device == "cuda": torch.cuda.max_memory_allocated(device=device) torch.cuda.empty_cache() pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True) pipe.enable_xformers_memory_efficient_attention() pipe = pipe.to(device) torch.cuda.empty_cache() else: pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True) pipe = pipe.to(device) return pipe def genie(prompt="sexy woman", use_details=False, steps=2, seed=398231747038484200, use_cuda=False): pipe = load_pipeline(use_cuda) generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed) if use_details: extended_prompt = extend_prompt(prompt) else: extended_prompt = prompt int_image = pipe(prompt=extended_prompt, generator=generator, num_inference_steps=steps, guidance_scale=0.0).images[0] return int_image, extended_prompt with gr.Blocks() as myface: gr.HTML() with gr.Row(): with gr.Row(): input_text = gr.Textbox(label='Prompt Text.', lines=1) details_checkbox = gr.Checkbox(label="details", info="Generate Details?") steps_slider = gr.Slider(1, maximum=5, value=2, step=1, label='Number of Iterations') seed_slider = gr.Slider(minimum=0, step=1, maximum=999999999999999999, randomize=True) cuda_checkbox = gr.Checkbox(label="Use CUDA", info="Use GPU for inference.") with gr.Row(): generate_button = gr.Button("Generate") with gr.Row(): output_image = gr.Image("./imagen.png") output_text = gr.Textbox(label="Generated Text", lines=2) generate_button.click(genie, inputs=[input_text, details_checkbox, steps_slider, seed_slider, cuda_checkbox], outputs=[output_image, output_text], concurrency_limit=10) # Define the example example = [["sexy woman", True, 2, 398231747038484200, ""], ['''sexy woman, in a black bikini, white bra, highly detailed, d & d, fantasy, highly detailed, digital painting, trending on artstation, concept art, sharp focus, illustration, art by artgerm and greg rutkowski and fuji choko and viktoria gavrilenko''', False, 2, 304332410412655740, ""], ['''sexy woman, D&D, fantasy, portrait, highly detailed, headshot, digital painting, trending on artstation, concept art, sharp focus, illustration, art by artgerm and greg rutkowski and magali villeneuve and wlop, ilya kuvshinov, octane render, 8 ''', False, 2, 747356768820251800, ""], [''' sexy woman, worksafe, light blonde long hair, fully clothed, brown eyes, sitting on a chair, sitting by a reflective pool, in the style of ilya kuvshinov, very dark, cinematic dramatic atmosphere, artstation, detailed facial ''', False, 2, 398231747038484200, ""], ['''sexy woman, medium shot, candid, red hair, 4 k, high definition, realistic, natural, highly detailed, photo realistic smooth, sharp, unreal engine 5, cinema4d, Blender, render photo-realistic, v-ray ''', False, 2, 398231747038484200, ""], ] with gr.Interface( fn=genie, inputs=[input_text, details_checkbox, steps_slider, seed_slider, cuda_checkbox], outputs=[output_image, output_text], title="Stable Diffusion Turbo with GPT", description="Type your text and lets create an image, check the box details if you want a creative picture", examples=example, ) as iface: iface.launch(inline=True, show_api=False, max_threads=200) if __name__ == "__main__": # اجرای FastAPI در یک رشته جداگانه threading.Thread(target=run_fastapi).start() # اجرای Gradio برنامه اصلی شما iface.launch()