OmParkashPandeY's picture
Upload 6 files
e3e79df
raw
history blame
3.37 kB
import os
import io
from PIL import Image
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
hf_api_key = os.environ['HF_API_KEY']
# Helper function
import requests, json
# API_URL = "https://api-inference.huggingface.co/models/sayakpaul/text-to-image-pokemons-gpt4"
# API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5"
# API_URL = "https://api-inference.huggingface.co/models/cloudqi/cqi_text_to_image_pt_v0"
# API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic"
# API_URL = "https://api-inference.huggingface.co/models/SimianLuo/LCM_Dreamshaper_v7"
API_URL = "https://api-inference.huggingface.co/models/tensor-diffusion/majicMIX-realistic-v7"
#Text-to-image endpoint
def get_completion(inputs, parameters=None, ENDPOINT_URL=API_URL):
headers = {
"Authorization": f"Bearer {hf_api_key}",
"Content-Type": "application/json"
}
data = { "inputs": inputs }
if parameters is not None:
data.update({"parameters": parameters})
response = requests.request("POST",ENDPOINT_URL,headers=headers,data=json.dumps(data))
return response.content
import gradio as gr
def generate(prompt):
output = get_completion(prompt)
result_image = Image.open(io.BytesIO(output))
return result_image
import gradio as gr
def generate(prompt, negative_prompt, steps, guidance, width, height):
params = {
"negative_prompt": negative_prompt,
"num_inference_steps": steps,
"guidance_scale": guidance,
"width": width,
"height": height
}
output = get_completion(prompt, params)
pil_image = Image.open(io.BytesIO(output))
return pil_image
def loadGUI():
with gr.Blocks() as demo:
gr.Markdown("# Image Generation with Stable Diffusion - Magic Mix RVSix")
with gr.Row():
with gr.Column(scale=4):
prompt = gr.Textbox(label="Your prompt") #Give prompt some real estate
with gr.Column(scale=1, min_width=50):
btn = gr.Button("Submit") #Submit button side by side!
with gr.Accordion("Advanced options", open=False): #Let's hide the advanced options!
negative_prompt = gr.Textbox(label="Negative prompt")
with gr.Row():
with gr.Column():
steps = gr.Slider(label="Inference Steps", minimum=1, maximum=100, step=32, value=25,
info="In many steps will the denoiser denoise the image?")
guidance = gr.Slider(label="Guidance Scale", minimum=1, maximum=20, step=32, value=7,
info="Controls how much the text prompt influences the result")
with gr.Column():
width = gr.Slider(label="Width", minimum=64, maximum=1024, step=32, value=512)
height = gr.Slider(label="Height", minimum=64, maximum=1024, step=32, value=512)
output = gr.Image(label="Result") #Move the output up too
btn.click(fn=generate, inputs=[prompt,negative_prompt,steps,guidance,width,height], outputs=[output])
gr.close_all()
demo.launch(share=True)
def main():
loadGUI()
if __name__ == "__main__":
main()