Spaces:
Sleeping
Sleeping
File size: 5,387 Bytes
2ec2ebd e2d57c7 2ec2ebd d415ad5 fe9c201 d415ad5 7eec65f 5825f86 d415ad5 e2b5548 d415ad5 3bbb0a9 e2b5548 bbbb412 e2b5548 bbbb412 d415ad5 bbbb412 c7ae8be bbbb412 e2b5548 d415ad5 e2b5548 d415ad5 f69345c d415ad5 5db3da7 eeaae19 dcdc468 c7fa7bf e2d57c7 d415ad5 97a323d c3eb335 d415ad5 1e6a002 f69345c d415ad5 dcdc468 740a0ae d8b8c5a 2ec2ebd e18f8f6 d415ad5 d329d58 d415ad5 7eec65f d415ad5 edb9b46 d415ad5 edb9b46 d415ad5 c7fa7bf eeaae19 c7fa7bf eeaae19 c7fa7bf eeaae19 c7fa7bf d415ad5 c7fa7bf d415ad5 6a0b597 d415ad5 df0a063 f806fdd 824b3c4 f806fdd 824b3c4 6a0b597 d415ad5 304f5cd 3bbb0a9 304f5cd 3bbb0a9 304f5cd 5db3da7 f852cc5 eeaae19 f852cc5 c3eb335 d415ad5 c3eb335 3e50201 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
import gradio as gr
from diffusion_lens import get_images
import numpy as np
MAX_SEED = np.iinfo(np.int32).max
# Description
title = r"""
<h1 align="center">Diffusion Lens: Interpreting Text Encoders in Text-to-Image Pipelines</h1>
"""
description = r"""
<b>A demo for the paper <a href='https://arxiv.org/abs/2403.05846' target='_blank'>Diffusion Lens: Interpreting Text Encoders in Text-to-Image Pipelines</a>.<br>
<b>Visit our <a href='https://tokeron.github.io/DiffusionLensWeb/' target='_blank'>project webpage</a> for more information.<br>
"""
article = r"""
---
π **Citation**
<br>
If our work is helpful for your research or applications, please cite us via:
```
bibtex
@article{toker2024diffusion,
title={Diffusion Lens: Interpreting Text Encoders in Text-to-Image Pipelines},
author={Toker, Michael and Orgad, Hadas and Ventura, Mor and Arad, Dana and Belinkov, Yonatan},
journal={arXiv preprint arXiv:2403.05846},
year={2024}
}
```
π§ **Abstact**
<br>
```
Text-to-image diffusion models (T2I) use a latent representation of a text prompt to guide the image generation process.
However, the process by which the encoder produces the text representation is unknown.
We propose the Diffusion Lens, a method for analyzing the text encoder of T2I models by generating images from its intermediate representations.
Using the Diffusion Lens, we perform an extensive analysis of two recent T2I models.
Exploring compound prompts, we find that complex scenes describing multiple objects are composed progressively and more slowly compared to simple scenes;
Exploring knowledge retrieval, we find that representation of uncommon concepts requires further computation compared to common concepts,
and that knowledge retrieval is gradual across layers.
Overall, our findings provide valuable insights into the text encoder component in T2I pipelines.
```
<br>
π§ **Contact**
<br>
```
If you have any questions, please feel free to open an issue or directly reach us out at tok@cs.technion.ac.il
```
</b>.
"""
model_num_of_layers = {
'Stable Diffusion 1.4': 13,
'Stable Diffusion 2.1': 23,
}
# def run_for_examples(prompt, model, seed, skip):
# return generate_images(prompt, model, seed, skip);
def generate_images(prompt, model, seed, skip):
seed = random.randint(0, MAX_SEED) if seed == -1 else seed
print('calling diffusion lens with model:', model, 'and seed:', seed)
# gr.Info('Generating images from intermediate layers..')
all_images = [] # Initialize a list to store all images
max_num_of_layers = model_num_of_layers[model]
start_layer = max_num_of_layers % skip
for skip_layers in range(max_num_of_layers - start_layer, -1, -1 * skip):
# Pass the model and seed to the get_images function
images = get_images(prompt, skip_layers=skip_layers, model=model, seed=seed)
all_images.append((images[0], f'layer_{max_num_of_layers - skip_layers}'))
yield all_images
gr.Info('Image generation complete')
with gr.Blocks() as demo:
gr.Markdown(title)
gr.Markdown(description)
# text_input = gr.Textbox(label="Enter prompt")
# model_select = gr.Dropdown(label="Select Model", choices=['sd1', 'sd2'])
# seed_input = gr.Number(label="Enter Seed", value=0) # Default seed set to 0
# Update the submit function to include the new inputs
# text_input.submit(fn=generate_images, inputs=[text_input, model_select, seed_input], outputs=gallery)
with gr.Column():
prompt = gr.Textbox(
label="Prompt",
value="A photo of Steve Jobs",
)
model = gr.Radio(
[
"Stable Diffusion 1.4",
"Stable Diffusion 2.1",
],
value="Stable Diffusion 1.4",
label="Model",
)
seed = gr.Slider(
minimum=0,
maximum=MAX_SEED,
value=42,
step=1,
label="Seed Value",
)
skip = gr.Slider(
minimum=1,
maximum=6,
value=3,
step=1,
label="# Layers to Skip Between Generations",
)
inputs = [
prompt,
model,
seed,
skip,
]
generate_button = gr.Button("Generate Image")
with gr.Column():
gallery = gr.Gallery(label="Generated Images", columns=6, rows=1, object_fit="contain", height="auto")
outputs = [gallery]
gr.on(
triggers=[
# prompt.submit,
generate_button.click,
# seed.input,
# model.input
],
fn=generate_images,
inputs=inputs,
outputs=outputs,
show_progress="full",
show_api=False,
trigger_mode="always_last",
)
# examples = [
# [
# "A photo of an Aye-aye.",
# "Stable Diffusion 2.1",
# 42,
# 1
# ],
# [
# "A photo of an Beagle.",
# "Stable Diffusion 2.1",
# 42,
# 1
# ],
# [
# "A green cat and a blue dog.",
# "Stable Diffusion 2.1",
# 42,
# 1
# ],
# ]
# gr.Examples(
# examples=examples,
# inputs = [prompt, model, seed, skip],
# fn=generate_images,
# outputs=[gallery],
# cache_examples=True,
# )
gr.Markdown(article)
demo.queue(api_open=False)
demo.launch(show_api=False)
|