Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,6 @@ import gradio as gr
|
|
9 |
import numpy as np
|
10 |
import PIL.Image
|
11 |
|
12 |
-
from huggingface_hub import snapshot_download
|
13 |
from diffusers import DiffusionPipeline
|
14 |
|
15 |
from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder, OVBaseModel, OVStableDiffusionPipeline
|
@@ -28,6 +27,8 @@ Converted from [SoteMix](https://huggingface.co/Disty0/SoteMix) to [LCM_SoteMix]
|
|
28 |
|
29 |
This model is for Anime art style.
|
30 |
|
|
|
|
|
31 |
[LCM Project page](https://latent-consistency-models.github.io)
|
32 |
|
33 |
<p>Running on CPU with OpenVINO Acceleration</p>
|
@@ -43,19 +44,7 @@ height = int(os.getenv("IMAGE_HEIGHT", "512"))
|
|
43 |
num_images = int(os.getenv("NUM_IMAGES", "1"))
|
44 |
guidance_scale = float(os.getenv("GUIDANCE_SCALE", "1.0"))
|
45 |
|
46 |
-
class CustomOVModelVaeDecoder(OVModelVaeDecoder):
|
47 |
-
def __init__(
|
48 |
-
self, model: openvino.runtime.Model, parent_model: OVBaseModel, ov_config: Optional[Dict[str, str]] = None, model_dir: str = None,
|
49 |
-
):
|
50 |
-
super(OVModelVaeDecoder, self).__init__(model, parent_model, ov_config, "vae_decoder", model_dir)
|
51 |
-
|
52 |
pipe = OVStableDiffusionPipeline.from_pretrained(model_id, compile = False, ov_config = {"CACHE_DIR":""})
|
53 |
-
|
54 |
-
# Inject TAESD
|
55 |
-
|
56 |
-
taesd_dir = snapshot_download(repo_id="deinferno/taesd-openvino")
|
57 |
-
pipe.vae_decoder = CustomOVModelVaeDecoder(model = OVBaseModel.load_model(f"{taesd_dir}/vae_decoder/openvino_model.xml"), parent_model = pipe, model_dir = taesd_dir)
|
58 |
-
|
59 |
pipe.reshape(batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images)
|
60 |
pipe.compile()
|
61 |
|
|
|
9 |
import numpy as np
|
10 |
import PIL.Image
|
11 |
|
|
|
12 |
from diffusers import DiffusionPipeline
|
13 |
|
14 |
from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder, OVBaseModel, OVStableDiffusionPipeline
|
|
|
27 |
|
28 |
This model is for Anime art style.
|
29 |
|
30 |
+
Faster but lower quality version with TAESD VAE: [LCM_SoteMix_OpenVINO_CPU_Space_TAESD](https://huggingface.co/spaces/Disty0/LCM_SoteMix_OpenVINO_CPU_Space_TAESD)
|
31 |
+
|
32 |
[LCM Project page](https://latent-consistency-models.github.io)
|
33 |
|
34 |
<p>Running on CPU with OpenVINO Acceleration</p>
|
|
|
44 |
num_images = int(os.getenv("NUM_IMAGES", "1"))
|
45 |
guidance_scale = float(os.getenv("GUIDANCE_SCALE", "1.0"))
|
46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
pipe = OVStableDiffusionPipeline.from_pretrained(model_id, compile = False, ov_config = {"CACHE_DIR":""})
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
pipe.reshape(batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images)
|
49 |
pipe.compile()
|
50 |
|