NagaSaiAbhinay's picture
Update app.py
9253136
raw
history blame
1.78 kB
from diffusers import DiffusionPipeline
import gradio as gr
import torch
import math
orig_start_prompt = "A photograph of an adult Lion"
orig_end_prompt = "A photograph of a Lion cub"
model_list = ["kakaobrain/karlo-v1-alpha"]
def unclip_text_interpolation(
model_path,
start_prompt,
end_prompt,
steps,
num_inference_steps
):
pipe = DiffusionPipeline.from_pretrained(model_list, torch_dtype=torch.bfloat16, custom_pipeline='unclip_text_interpolation')
images = pipe(start_prompt, end_prompt, steps, num_inference_steps=num_inference_steps)
return images
inputs = [
gr.Dropdown(model_list, value=model_list[0], label="Model"),
gr.inputs.Textbox(lines=5, default=orig_start_prompt, label="Start Prompt"),
gr.inputs.Textbox(lines=1, default=orig_end_prompt, label="End Prompt"),
gr.inputs.Slider(minimum=2, maximum=12, default=5, step=1, label="Steps")
]
output = gr.Gallery(
label="Generated images", show_label=False, elem_id="gallery"
).style(grid=[2], height="auto")
examples = [
["kakaobrain/karlo-v1-alpha", orig_start_prompt, orig_end_prompt, 6],
]
title = "UnClip Text Interpolation Pipeline"
description = """<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
<br/>
<a href="https://huggingface.co/spaces/kadirnar/stable-diffusion-2-infinite-zoom-out?duplicate=true">
<img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
<p/>"""
demo_app = gr.Interface(
fn=unclip_text_interpolation,
description=description,
inputs=inputs,
outputs=output,
title=title,
theme='huggingface',
examples=examples,
cache_examples=True
)
demo_app.launch(debug=True, enable_queue=True)