from diffusers import AutoPipelineForText2Image import torch import gradio as gr import threading import time; from queue import Queue # @spaces.GPU(duration=120) def GenerateImage(prompt,steps,progress,model): data = [] queue = Queue(); def StartThread(): pipe_txt2img = AutoPipelineForText2Image.from_pretrained( model, torch_dtype=torch.float16, use_safetensors=True ).to("cuda") vae = pipe_txt2img.vae def latents_callback(i, t, latents): latents = 1 / 0.18215 * latents image = vae.decode(latents).sample[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(1, 2, 0).numpy() FinalImage = pipe_txt2img.numpy_to_pil(image) queue.put({'type':'image', 'image':FinalImage[0], 'step': i}) generator = torch.Generator(device="cpu").manual_seed(37) FinalImage = pipe_txt2img(prompt, generator=generator, num_inference_steps=steps,callback=latents_callback, callback_steps=progress).images[0] queue.put({'type':'image', 'image':FinalImage, 'step': steps+1}) queue.put({'type':'end'}) t = threading.Thread(target=StartThread) t.start(); while True: print("Waiting next item"); nextItem = queue.get() if nextItem['type'] == 'end': break; Image = nextItem['image'] Step = nextItem['step'] yield [Image,Step]; print("Waiting thread finish..."); t.join() print("Finished!"); with gr.Blocks() as demo: gr.Markdown(""" This is a lab to demonstrate how we can implement a text-to-image generation using Gradio and Diffusers, showing the progress of each image produced at each step. Type a prompt, choose the maximum number of steps and the frequency (in steps) at which progress is shown. You will see the diffusion process live! """) with gr.Row(): prompt = gr.Text(label="prompt"); TotalSteps = gr.Slider(label="Steps", minimum=1,maximum=150,value=10); ProgressSteps = gr.Number(label="Progress steps", value = 2); model = gr.Text(label="Model", value="dreamlike-art/dreamlike-photoreal-2.0") with gr.Row(): with gr.Column(): btnRun = gr.Button(value="Run!"); btnStop = gr.Button(value="Stop!"); status = gr.Text(label="Current Step"); image = gr.Image(); GenerateEvent = btnRun.click( GenerateImage, [prompt,TotalSteps,ProgressSteps,model], [image,status] ); btnStop.click( None,None,None, cancels=[GenerateEvent] ) if __name__ == "__main__": demo.launch(show_api=True)