from huggingface_hub import from_pretrained_keras from keras_cv import models import gradio as gr sd_dreambooth_model = models.StableDiffusion( img_width=512, img_height=512 ) db_diffusion_model = from_pretrained_keras("danwaldie/dreambooth_teddy") sd_dreambooth_model._diffusion_model = db_diffusion_model # generate images def infer(prompt, negative_prompt, num_imgs_to_gen, num_steps, guidance_scale): generated_images = sd_dreambooth_model.text_to_image( prompt, negative_prompt=negative_prompt, batch_size=num_imgs_to_gen, num_steps=num_steps, unconditional_guidance_scale=guidance_scale ) return generated_images # output = gr.Gallery(label="Outputs").style(grid=(2,2)) # pass function, input type for prompt, the output for multiple images gr.Interface( infer, [ gr.Textbox(label="Positive Prompt", value="a teddy_holmes dog astronaut in space"), gr.Textbox(label="Negative Prompt", value="bad anatomy, blurry"), gr.Slider(label='Number of gen image', minimum=1, maximum=4, value=2, step=1), gr.Slider(label="Inference Steps",value=50), gr.Number(label='Guidance scale', value=7.5), ], [ gr.Gallery(show_label=False), ], title="Dreambooth Teddy", description = "This is a dreambooth model fine-tuned on images of my dog, Teddy. Teddy is a Mini Double Doodle, whose mom was a mini golden doodle, and his dad was a labradoodle. To try it, input the concept with {teddy_holmes dog}.", examples = [["a pencil drawing of a teddy_holmes dog as a knight in armor", "", 2, 50, 7.5]], ).launch()