Spaces:
Running
Running
File size: 4,275 Bytes
3469d37 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
import gradio as gr
from backend.lcm_text_to_image import LCMTextToImage
from backend.models.lcmdiffusion_setting import LCMLora, LCMDiffusionSetting
from constants import DEVICE, LCM_DEFAULT_MODEL_OPENVINO
from time import perf_counter
import numpy as np
from cv2 import imencode
import base64
from backend.device import get_device_name
from constants import APP_VERSION
from backend.device import is_openvino_device
lcm_text_to_image = LCMTextToImage()
lcm_lora = LCMLora(
base_model_id="Lykon/dreamshaper-7",
lcm_lora_id="latent-consistency/lcm-lora-sdv1-5",
)
# https://github.com/gradio-app/gradio/issues/2635#issuecomment-1423531319
def encode_pil_to_base64_new(pil_image):
image_arr = np.asarray(pil_image)[:, :, ::-1]
_, byte_data = imencode(".png", image_arr)
base64_data = base64.b64encode(byte_data)
base64_string_opencv = base64_data.decode("utf-8")
return "data:image/png;base64," + base64_string_opencv
# monkey patching encode pil
gr.processing_utils.encode_pil_to_base64 = encode_pil_to_base64_new
def predict(
prompt,
steps,
seed,
):
lcm_text_to_image.init(
model_id=LCM_DEFAULT_MODEL_OPENVINO,
use_lora=True,
lcm_lora=lcm_lora,
use_openvino=True if is_openvino_device() else False,
)
lcm_diffusion_setting = LCMDiffusionSetting()
lcm_diffusion_setting.prompt = prompt
lcm_diffusion_setting.guidance_scale = 1.0
lcm_diffusion_setting.inference_steps = steps
lcm_diffusion_setting.seed = seed
lcm_diffusion_setting.use_seed = True
lcm_diffusion_setting.image_width = 384 if is_openvino_device() else 512
lcm_diffusion_setting.image_height = 384 if is_openvino_device() else 512
lcm_diffusion_setting.use_openvino = True if is_openvino_device() else False
start = perf_counter()
images = lcm_text_to_image.generate(lcm_diffusion_setting)
latency = perf_counter() - start
print(f"Latency: {latency:.2f} seconds")
return images[0]
css = """
#container{
margin: 0 auto;
max-width: 40rem;
}
#intro{
max-width: 100%;
text-align: center;
margin: 0 auto;
}
#generate_button {
color: white;
border-color: #007bff;
background: #007bff;
width: 200px;
height: 50px;
}
footer {
visibility: hidden
}
"""
def _get_footer_message() -> str:
version = f"<center><p> {APP_VERSION} "
footer_msg = version + (
' © 2023 <a href="https://github.com/rupeshs">'
" Rupesh Sreeraman</a></p></center>"
)
return footer_msg
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="container"):
use_openvino = "- OpenVINO" if is_openvino_device() else ""
gr.Markdown(
f"""FastSD CPU demo {use_openvino}
**Device : {DEVICE.upper()} , {get_device_name()}**
""",
elem_id="intro",
)
with gr.Row():
with gr.Row():
prompt = gr.Textbox(
placeholder="Describe the image you'd like to see",
scale=5,
container=False,
)
generate_btn = gr.Button(
"Generate",
scale=1,
elem_id="generate_button",
)
image = gr.Image(type="filepath")
with gr.Accordion("Advanced options", open=False):
steps = gr.Slider(
label="Steps",
value=2 if is_openvino_device() else 3,
minimum=1,
maximum=6,
step=1,
)
seed = gr.Slider(
randomize=True,
minimum=0,
maximum=999999999,
label="Seed",
step=1,
)
gr.HTML(_get_footer_message())
inputs = [prompt, steps, seed]
prompt.input(fn=predict, inputs=inputs, show_progress=False)
generate_btn.click(
fn=predict, inputs=inputs, outputs=image, show_progress=False
)
steps.change(fn=predict, inputs=inputs, show_progress=False)
seed.change(fn=predict, inputs=inputs, show_progress=False)
def start_demo_text_to_image(share=False):
demo.queue()
demo.launch(share=share)
|