Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
import gradio as gr | |
import requests | |
import io | |
from PIL import Image | |
import json | |
import os | |
# Load LoRAs from JSON | |
with open('loras.json', 'r') as f: | |
loras = json.load(f) | |
# API call function | |
def query(payload, api_url, token): | |
headers = {"Authorization": f"Bearer {token}"} | |
response = requests.post(api_url, headers=headers, json=payload) | |
return io.BytesIO(response.content) | |
# Define the function to run when the button is clicked | |
def run_lora(prompt): | |
selected_lora = loras[0] | |
api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}" | |
trigger_word = selected_lora["trigger_word"] | |
token = os.getenv("API_TOKEN") | |
payload = {"inputs": f"{prompt} {trigger_word}"} | |
image_bytes = query(payload, api_url, token) | |
return Image.open(image_bytes) | |
# Gradio UI | |
with gr.Blocks(css="custom.css") as app: | |
title = gr.HTML("<h1>LoRA the Explorer</h1>") | |
selected_state = gr.State() | |
with gr.Row(): | |
gallery = gr.Gallery( | |
[(item["image"], item["title"]) for item in loras], | |
label="LoRA Gallery", | |
allow_preview=False, | |
columns=3 | |
) | |
with gr.Column(): | |
prompt_title = gr.Markdown("### Click on a LoRA in the gallery to select it") | |
with gr.Row(): | |
prompt = gr.Textbox(label="Prompt", show_label=False, lines=1, max_lines=1, placeholder="Type a prompt after selecting a LoRA") | |
button = gr.Button("Run") | |
result = gr.Image(interactive=False, label="Generated Image") | |
with gr.Accordion("Advanced options", open=False): | |
# Add any advanced options you need here | |
gallery.select( | |
# Define your update_selection function | |
) | |
prompt.submit( | |
fn=run_lora, | |
inputs=[prompt], | |
outputs=[result] | |
) | |
button.click( | |
fn=run_lora, | |
inputs=[prompt], | |
outputs=[result] | |
) | |
app.queue(max_size=20) | |
app.launch() | |