import gradio as gr import torch from torch import autocast from diffusers import StableDiffusionPipeline #model_id = "lunarfish/furrydiffusion" pipe = StableDiffusionPipeline.from_pretrained("lunarfish/furrydiffusion", torch_type=torch.float16, revision="main") num_samples = 2 def infer(prompt): images = pipe([prompt] * num_samples, guidance_scale=7.5)["sample"] return images block = gr.Blocks() examples = [ [ 'fox' ], [ 'rabbit' ], [ 'wolf' ], ] with block as demo: with gr.Group(): with gr.Box(): with gr.Row().style(mobile_collapse=False, equal_height=True): text = gr.Textbox( label="Enter your prompt", show_label=False, max_lines=1 ).style( border=(True, False, True, True), rounded=(True, False, False, True), container=False, ) btn = gr.Button("Run").style( margin=False, rounded=(False, True, True, False), ) gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="generated_id").style( grid=[2], height="auto" ) ex = gr.Examples(examples=examples, fn=infer, inputs=[text], outputs=gallery, cache_examples=True) ex.dataset.headers = [""] text.submit(infer, inputs=[text], outputs=gallery) btn.click(infer, inputs=[text], outputs=gallery) demo.queue(max_size=25).launch()