File size: 6,600 Bytes
226a7b7
1885732
b45db27
 
 
 
 
5850fbf
b45db27
 
 
4d7ada0
 
b4f0b46
4d7ada0
b45db27
 
4d7ada0
b45db27
4d7ada0
b4f0b46
4d7ada0
 
 
 
 
 
 
 
 
 
b45db27
 
 
 
b4f0b46
6c52d81
b45db27
 
 
4d7ada0
b45db27
 
c9c788f
4d7ada0
b4f0b46
4d7ada0
 
 
 
 
 
 
 
 
 
 
 
b4f0b46
4d7ada0
b45db27
 
 
 
 
 
 
 
 
 
 
 
 
7185cf4
79937ec
4d7ada0
b45db27
 
c9c788f
4d7ada0
 
b45db27
 
b4f0b46
b45db27
f13e2d2
b45db27
b4f0b46
 
b45db27
 
 
 
2efc5d6
b45db27
c9c788f
 
 
 
 
 
b4f0b46
c9c788f
b4f0b46
 
 
7e3e351
4d7ada0
b45db27
 
 
4d7ada0
b4f0b46
 
2cada81
 
 
 
 
 
 
79dca31
 
 
b4f0b46
87d81cd
b4f0b46
87d81cd
79dca31
 
 
 
 
b45db27
 
 
 
c9c788f
b45db27
4d7ada0
b45db27
 
 
 
 
 
79937ec
b45db27
 
 
7185cf4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
import os
import spaces
import torch
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
import gradio as gr
import random
import tqdm

# Enable TQDM progress tracking
tqdm.monitor_interval = 0

# Load the diffusion pipelines
pipe1 = StableDiffusionXLPipeline.from_pretrained(
    "kayfahaarukku/UrangDiffusion-1.4", 
    torch_dtype=torch.float16, 
    custom_pipeline="lpw_stable_diffusion_xl",
)
pipe1.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe1.scheduler.config)

pipe2 = StableDiffusionXLPipeline.from_pretrained(
    "kayfahaarukku/UrangDiffusion-2.0", 
    torch_dtype=torch.float16, 
    custom_pipeline="lpw_stable_diffusion_xl",
)
pipe2.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe2.scheduler.config)

# Function to generate images from both models
@spaces.GPU
def generate_comparison(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
    pipe1.to('cuda')
    pipe2.to('cuda')
    
    if randomize_seed:
        seed = random.randint(0, 99999999)
    if use_defaults:
        prompt = f"{prompt}, best quality, amazing quality, very aesthetic"
        negative_prompt = f"nsfw, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract], {negative_prompt}"
    generator = torch.manual_seed(seed)
    
    def callback(step, timestep, latents):
        progress(step / (2 * num_inference_steps))
        return
    
    width, height = map(int, resolution.split('x'))
    
    # Generate image with UrangDiffusion-1.4
    image1 = pipe1(
        prompt, 
        negative_prompt=negative_prompt,
        width=width,
        height=height, 
        guidance_scale=guidance_scale,
        num_inference_steps=num_inference_steps,
        generator=generator,
        callback=callback,
        callback_steps=1
    ).images[0]

    # Generate image with UrangDiffusion-2.0
    image2 = pipe2(
        prompt, 
        negative_prompt=negative_prompt,
        width=width,
        height=height, 
        guidance_scale=guidance_scale,
        num_inference_steps=num_inference_steps,
        generator=generator,
        callback=callback,
        callback_steps=1
    ).images[0]

    torch.cuda.empty_cache()

    metadata_text = f"{prompt}\nNegative prompt: {negative_prompt}\nSteps: {num_inference_steps}, Sampler: Euler a, Size: {width}x{height}, Seed: {seed}, CFG scale: {guidance_scale}"

    return image1, image2, seed, metadata_text

# Define Gradio interface
def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
    image1, image2, seed, metadata_text = generate_comparison(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress)
    return image1, image2, seed, gr.update(value=metadata_text)

def reset_inputs():
    return gr.update(value=''), gr.update(value=''), gr.update(value=True), gr.update(value='832x1216'), gr.update(value=7), gr.update(value=28), gr.update(value=0), gr.update(value=True), gr.update(value='')

with gr.Blocks(title="UrangDiffusion Comparison Demo", theme="NoCrypt/miku@1.2.1") as demo:
    gr.HTML(
        "<h1>UrangDiffusion 1.4 vs 2.0 Comparison Demo</h1>"
        "This demo showcases a comparison between UrangDiffusion 1.4 and 2.0."
        )
    with gr.Row():
        with gr.Column():
            prompt_input = gr.Textbox(lines=2, placeholder="Enter prompt here", label="Prompt")
            negative_prompt_input = gr.Textbox(lines=2, placeholder="Enter negative prompt here", label="Negative Prompt")
            use_defaults_input = gr.Checkbox(label="Use Default Quality Tags and Negative Prompt", value=True)
            resolution_input = gr.Radio(
                choices=[
                    "1024x1024", "1152x896", "896x1152", "1216x832", "832x1216",
                    "1344x768", "768x1344", "1536x640", "640x1536"
                ],
                label="Resolution",
                value="832x1216"
            )
            guidance_scale_input = gr.Slider(minimum=1, maximum=20, step=0.5, label="Guidance Scale", value=7)
            num_inference_steps_input = gr.Slider(minimum=1, maximum=100, step=1, label="Number of Inference Steps", value=28)
            seed_input = gr.Slider(minimum=0, maximum=999999999, step=1, label="Seed", value=0, interactive=True)
            randomize_seed_input = gr.Checkbox(label="Randomize Seed", value=True)
            generate_button = gr.Button("Generate Comparison")
            reset_button = gr.Button("Reset")

        with gr.Column():
            with gr.Row():
                output_image1 = gr.Image(type="pil", label="UrangDiffusion 1.4")
                output_image2 = gr.Image(type="pil", label="UrangDiffusion 2.0")
            with gr.Accordion("Parameters", open=False):
                gr.Markdown(
                    """
                    This parameter is compatible with Stable Diffusion WebUI's parameter importer.
                    """
                )
                metadata_textbox = gr.Textbox(lines=6, label="Image Parameters", interactive=False, max_lines=6)
            gr.Markdown(
                """
                ### Recommended prompt formatting:
                `1girl/1boy, character name, from what series, everything else in any order, best quality, amazing quality, very aesthetic`

                **PS:** `best quality, amazing quality, very aesthetic` is automatically added when "Use Default Quality Tags and Negative Prompt" is enabled

                ### Recommended settings:
                - Steps: 25-30
                - CFG: 5-7
                """
            )

    generate_button.click(
        interface_fn,
        inputs=[
            prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input
        ],
        outputs=[output_image1, output_image2, seed_input, metadata_textbox]
    )
    
    reset_button.click(
        reset_inputs,
        inputs=[],
        outputs=[
            prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input, metadata_textbox
        ]
    )

demo.queue(max_size=20).launch(share=False)