Spaces:
Running
Running
File size: 5,068 Bytes
3469d37 001f6be fa85a50 3469d37 1296453 3469d37 001f6be 3469d37 fa85a50 1139a17 3469d37 1296453 ea39d32 fa85a50 f4e968d 5993d69 1139a17 fa85a50 3469d37 f4e968d 3469d37 e954e95 3469d37 5993d69 3469d37 1296453 3469d37 6144792 3469d37 1139a17 3469d37 1296453 3469d37 1296453 3469d37 d90abc2 9913350 3469d37 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
import gradio as gr
from backend.lcm_text_to_image import LCMTextToImage
from backend.models.lcmdiffusion_setting import LCMLora, LCMDiffusionSetting
from constants import DEVICE, LCM_DEFAULT_MODEL_OPENVINO
from time import perf_counter
import numpy as np
from cv2 import imencode
import base64
from backend.device import get_device_name
from constants import APP_VERSION
from backend.device import is_openvino_device
import PIL
from backend.models.lcmdiffusion_setting import DiffusionTask
from pprint import pprint
lcm_text_to_image = LCMTextToImage()
lcm_lora = LCMLora(
base_model_id="Lykon/dreamshaper-7",
lcm_lora_id="latent-consistency/lcm-lora-sdv1-5",
)
# https://github.com/gradio-app/gradio/issues/2635#issuecomment-1423531319
def encode_pil_to_base64_new(pil_image):
image_arr = np.asarray(pil_image)[:, :, ::-1]
_, byte_data = imencode(".png", image_arr)
base64_data = base64.b64encode(byte_data)
base64_string_opencv = base64_data.decode("utf-8")
return "data:image/png;base64," + base64_string_opencv
# monkey patching encode pil
gr.processing_utils.encode_pil_to_base64 = encode_pil_to_base64_new
def predict(
prompt,
steps,
seed,
use_seed,
):
print(f"prompt - {prompt}")
lcm_diffusion_setting = LCMDiffusionSetting()
lcm_diffusion_setting.diffusion_task = DiffusionTask.text_to_image.value
lcm_diffusion_setting.openvino_lcm_model_id = "rupeshs/LCM-dreamshaper-v7-openvino"
lcm_diffusion_setting.use_lcm_lora = True
lcm_diffusion_setting.prompt = prompt
lcm_diffusion_setting.guidance_scale = 1.0
lcm_diffusion_setting.inference_steps = steps
lcm_diffusion_setting.seed = seed
lcm_diffusion_setting.use_seed = use_seed
lcm_diffusion_setting.use_safety_checker = True
lcm_diffusion_setting.use_tiny_auto_encoder = True
# lcm_diffusion_setting.image_width = 320 if is_openvino_device() else 512
# lcm_diffusion_setting.image_height = 320 if is_openvino_device() else 512
lcm_diffusion_setting.image_width = 512
lcm_diffusion_setting.image_height = 512
lcm_diffusion_setting.use_openvino = False
lcm_diffusion_setting.use_tiny_auto_encoder = True
pprint(lcm_diffusion_setting.model_dump())
lcm_text_to_image.init(lcm_diffusion_setting=lcm_diffusion_setting)
start = perf_counter()
images = lcm_text_to_image.generate(lcm_diffusion_setting)
latency = perf_counter() - start
print(f"Latency: {latency:.2f} seconds")
return images[0] # .resize([512, 512], PIL.Image.ANTIALIAS)
css = """
#container{
margin: 0 auto;
max-width: 40rem;
}
#intro{
max-width: 100%;
text-align: center;
margin: 0 auto;
}
#generate_button {
color: white;
border-color: #007bff;
background: #007bff;
width: 200px;
height: 50px;
}
footer {
visibility: hidden
}
"""
def _get_footer_message() -> str:
version = f"<center><p> {APP_VERSION} "
footer_msg = version + (
' © 2023 <a href="https://github.com/rupeshs">'
" Rupesh Sreeraman</a></p></center>"
)
warning_msg = "<p><b> Please note that this is a minimal demo app.</b> </p><br>"
return warning_msg + footer_msg
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="container"):
use_openvino = "" if is_openvino_device() else ""
gr.Markdown(
f"""# FastSD CPU demo {use_openvino}
**Device : {DEVICE.upper()} , {get_device_name()}**
""",
elem_id="intro",
)
gr.HTML(
f"""
<p id="project-links" align="center">
<a href='https://github.com/rupeshs/fastsdcpu'><img src='https://img.shields.io/badge/Project-Page-Green'></a>
</p>
"""
)
with gr.Row():
with gr.Row():
prompt = gr.Textbox(
placeholder="Describe the image you'd like to see",
scale=5,
container=False,
)
generate_btn = gr.Button(
"Generate",
scale=1,
elem_id="generate_button",
)
image = gr.Image(type="filepath")
with gr.Accordion("Advanced options", open=False):
steps = gr.Slider(
label="Steps",
value=3,
minimum=1,
maximum=4,
step=1,
)
seed = gr.Slider(
randomize=True,
minimum=0,
maximum=999999999,
label="Seed",
step=1,
)
seed_checkbox = gr.Checkbox(
label="Use seed",
value=False,
interactive=True,
)
gr.HTML(_get_footer_message())
inputs = [prompt, steps, seed, seed_checkbox]
generate_btn.click(fn=predict, inputs=inputs, outputs=image)
def start_demo_text_to_image(share=False):
demo.queue()
demo.launch(share=share)
|