artificialguybr's picture
Update app.py
10fbca6
raw
history blame
1.83 kB
import gradio as gr
import requests
import io
from PIL import Image
import json
import os
# Load LoRAs from JSON
with open('loras.json', 'r') as f:
loras = json.load(f)
# API call function
def query(payload, api_url, token):
headers = {"Authorization": f"Bearer {token}"}
response = requests.post(api_url, headers=headers, json=payload)
return io.BytesIO(response.content)
# Define the function to run when the button is clicked
def run_lora(prompt):
selected_lora = loras[0]
api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}"
trigger_word = selected_lora["trigger_word"]
token = os.getenv("API_TOKEN")
payload = {"inputs": f"{prompt} {trigger_word}"}
image_bytes = query(payload, api_url, token)
return Image.open(image_bytes)
# Gradio UI
print("Before Gradio Interface")
with gr.Blocks(css="custom.css") as app:
title = gr.HTML("<h1>LoRA the Explorer</h1>")
gallery = gr.Gallery(
[(item["image"], item["title"]) for item in loras],
label="LoRA Gallery",
allow_preview=False,
columns=3,
)
prompt = gr.Textbox(label="Prompt", lines=1, max_lines=1, placeholder="Type a prompt after selecting a LoRA")
result = gr.Image(interactive=False, label="Generated Image")
with gr.Row():
with gr.Column(): # Removed the width parameter
title
gallery
with gr.Column(): # Removed the width parameter
prompt
gr.Button("Run").click(
fn=run_lora,
inputs=[prompt],
outputs=[result]
)
result
print("After Gradio Interface")
# Launch the Gradio interface with a queue and specify the width here
app.launch(debug=True, layout=("100%", "100%")) # Added layout parameter