File size: 4,132 Bytes
e73f9c3
 
19cfd55
c6b4f10
19cfd55
e7f40fd
a62dee5
780f887
9b8152b
e1db425
3d65110
48c5907
1884209
 
e73f9c3
19cfd55
e73f9c3
0a83ea8
19cfd55
 
 
a8198e7
19cfd55
a62dee5
16f3ee6
 
 
1e30067
 
1421a2a
 
 
a62dee5
2130937
ca2b570
6d32913
ca2b570
19cfd55
 
258eed8
a028f8b
0f45713
8bb7ab1
19cfd55
 
6c29290
c232559
8bb7ab1
 
19cfd55
e73f9c3
19cfd55
edb9ac5
e73f9c3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19cfd55
e73f9c3
19cfd55
e73f9c3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8bb7ab1
 
 
 
 
 
e73f9c3
 
 
 
 
 
19cfd55
e73f9c3
4c2d1f2
e73f9c3
 
 
 
 
 
 
 
 
8bb7ab1
e73f9c3
 
 
e65963a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import gradio as gr
import numpy as np
from optimum.intel import OVStableDiffusionPipeline, OVStableDiffusionXLPipeline, OVLatentConsistencyModelPipeline
from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder, OVBaseModel
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
from diffusers import DiffusionPipeline
from diffusers.schedulers import EulerDiscreteScheduler
import openvino.runtime as ov
from typing import Optional, Dict
from huggingface_hub import snapshot_download

#model_id = "echarlaix/sdxl-turbo-openvino-int8"
#model_id = "echarlaix/LCM_Dreamshaper_v7-openvino"
model_id = "OpenVINO/LCM_Dreamshaper_v7-int8-ov"

#safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")


#pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False, safety_checker=safety_checker)
pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False)

batch_size, num_images, height, width = 1, 1, 2048, 2048
pipeline.reshape(batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images)

#不可用lora
#pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
#pipeline.set_adapters("pixel")


# 选择采样方法(调度器) 可以新增但是跑就死
#scheduler = EulerDiscreteScheduler()
#pipeline.scheduler = scheduler

#badhandv4
#pipeline.load_textual_inversion("./badhandv4.pt", "badhandv4")
#hiten1
#pipeline.load_textual_inversion("./hiten1.pt", "hiten1")
pipeline.compile()

#TypeError: LatentConsistencyPipelineMixin.__call__() got an unexpected keyword argument 'negative_prompt'
#negative_prompt="easynegative,bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs, nsfw, nude, censored,  "

def infer(prompt, num_inference_steps):

    image = pipeline(
        prompt = prompt, 
        #negative_prompt = negative_prompt,
        guidance_scale = 7.0,
        num_inference_steps = num_inference_steps, 
        width = width,
        height = height,
        num_images_per_prompt=num_images,
    ).images[0]
    
    return image

examples = [
    "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
    "An astronaut riding a green horse",
    "A delicious ceviche cheesecake slice",
]

css="""
#col-container {
    margin: 0 auto;
    max-width: 520px;
}
"""


with gr.Blocks(css=css) as demo:
    
    with gr.Column(elem_id="col-container"):
        gr.Markdown(f"""
        # Demo : [Fast LCM](https://huggingface.co/OpenVINO/LCM_Dreamshaper_v7-int8-ov) quantized with NNCF ⚡
        """)

        with gr.Row():
            
            prompt = gr.Text(
                label="Prompt",
                show_label=False,
                max_lines=1,
                placeholder="Enter your prompt",
                container=False,
            )
            
            run_button = gr.Button("Run", scale=0)
        
        result = gr.Image(label="Result", show_label=False)

        with gr.Accordion("Advanced Settings", open=False):
            #with gr.Row():
            # negative_prompt = gr.Text(
            #     label="Negative prompt",
            #     max_lines=1,
            #     placeholder="Enter a negative prompt",
            # )
            
            with gr.Row():
                
                num_inference_steps = gr.Slider(
                    label="Number of inference steps",
                    minimum=1,
                    maximum=10,
                    step=1,
                    value=8,
                )
        
        gr.Examples(
            examples = examples,
            inputs = [prompt]
        )

    run_button.click(
        fn = infer,
        inputs = [prompt, num_inference_steps],
        outputs = [result]
    )

demo.queue().launch(share=True)