import gradio as gr from transformers import pipeline import os pipe = pipeline('text-generation', model='daspartho/prompt-extend') stable_diffusion = gr.Blocks.load(name="spaces/runwayml/stable-diffusion-v1-5") clip_interrogator_2 = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2") def get_images(prompt): gallery_dir = stable_diffusion(prompt, fn_index=2) img_results = [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)] return img_results[0] def get_new_prompt(img, mode): interrogate = clip_interrogator_2(img, mode, 12, api_name="clipi2") return interrogate def infer(input): prompt = pipe(input+',', num_return_sequences=1)[0]["generated_text"] img = get_images(prompt) result = get_new_prompt(img, 'fast') return result[0] input_prompt = gr.Text(label="Enter the initial prompt") sd2_output = gr.Text(label="Extended prompt suitable for Stable Diffusion 2") gr.Markdown(""" ## Prompt Extender for SD 2 """) gr.HTML('''
Enter a main initial idea for a prompt, and the model will generate a prompt suitable for Stable Diffusion 2
''') demo = gr.Interface(fn=infer, inputs=input_prompt, outputs=sd2_output) demo.queue(max_size=10,concurrency_count=20) demo.launch(enable_queue=True)