Spaces:
Running
on
Zero
Running
on
Zero
Plat
commited on
Commit
β’
6ea233e
1
Parent(s):
ff84eba
fix: settings
Browse files
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
-
title: Random Illustrious
|
3 |
-
emoji:
|
4 |
colorFrom: purple
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
|
|
1 |
---
|
2 |
+
title: Random Illustrious XL
|
3 |
+
emoji: π¨ποΈ
|
4 |
colorFrom: purple
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
app.py
CHANGED
@@ -36,13 +36,12 @@ IMAGE_MODEL_REPO_ID = os.environ.get(
|
|
36 |
DART_V3_REPO_ID = os.environ.get("DART_V3_REPO_ID", None)
|
37 |
assert DART_V3_REPO_ID is not None
|
38 |
|
39 |
-
torch_dtype = torch.bfloat16
|
40 |
-
|
41 |
dart = AutoModelForCausalLM.from_pretrained(
|
42 |
DART_V3_REPO_ID,
|
43 |
-
torch_dtype=
|
44 |
token=HF_TOKEN,
|
45 |
use_cache=True,
|
|
|
46 |
)
|
47 |
dart = dart.eval()
|
48 |
dart = dart.requires_grad_(False)
|
@@ -50,14 +49,15 @@ dart = torch.compile(dart)
|
|
50 |
tokenizer = AutoTokenizer.from_pretrained(DART_V3_REPO_ID)
|
51 |
|
52 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
53 |
-
IMAGE_MODEL_REPO_ID,
|
54 |
-
torch_dtype=
|
55 |
add_watermarker=False,
|
56 |
-
custom_pipeline="lpw_stable_diffusion_xl"
|
57 |
)
|
58 |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
59 |
pipe.unet.set_attn_processor(AttnProcessor2_0())
|
60 |
-
|
|
|
61 |
|
62 |
|
63 |
MAX_SEED = np.iinfo(np.int32).max
|
@@ -70,32 +70,35 @@ TEMPLATE = (
|
|
70 |
"{aspect_ratio}"
|
71 |
"<|length:medium|>"
|
72 |
#
|
73 |
-
"<copyright
|
74 |
#
|
75 |
"<character></character>"
|
76 |
#
|
77 |
-
"<general>"
|
78 |
)
|
|
|
|
|
|
|
79 |
|
80 |
def get_aspect_ratio(width: int, height: int) -> str:
|
81 |
ar = width / height
|
82 |
|
83 |
if ar <= 1 / math.sqrt(3):
|
84 |
-
return "<|aspect_ratio:
|
85 |
-
elif ar <= 8 / 9:
|
86 |
-
return "<|aspect_ratio:
|
87 |
elif ar < 9 / 8:
|
88 |
return "<|aspect_ratio:square|>"
|
89 |
elif ar < math.sqrt(3):
|
90 |
-
return "<|aspect_ratio:
|
91 |
else:
|
92 |
-
return "<|aspect_ratio:
|
93 |
|
94 |
|
95 |
@torch.inference_mode
|
96 |
-
def generate_prompt(aspect_ratio: str):
|
97 |
input_ids = tokenizer.encode_plus(
|
98 |
-
TEMPLATE.format(aspect_ratio=aspect_ratio),
|
99 |
return_tensors="pt",
|
100 |
).input_ids
|
101 |
print("input_ids:", input_ids)
|
@@ -111,14 +114,22 @@ def generate_prompt(aspect_ratio: str):
|
|
111 |
)[0]
|
112 |
|
113 |
generated = output_ids[len(input_ids) :]
|
114 |
-
decoded = ", ".join(
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
print("decoded:", decoded)
|
116 |
|
117 |
return decoded
|
118 |
|
|
|
119 |
def format_prompt(prompt: str, prompt_suffix: str):
|
120 |
return f"{prompt}, {prompt_suffix}"
|
121 |
|
|
|
122 |
@spaces.GPU
|
123 |
def generate_image(
|
124 |
prompt: str,
|
@@ -141,7 +152,9 @@ def generate_image(
|
|
141 |
|
142 |
return image
|
143 |
|
|
|
144 |
def on_generate(
|
|
|
145 |
suffix: str,
|
146 |
negative_prompt: str,
|
147 |
seed,
|
@@ -157,7 +170,8 @@ def on_generate(
|
|
157 |
generator = torch.Generator().manual_seed(seed)
|
158 |
|
159 |
ar = get_aspect_ratio(width, height)
|
160 |
-
|
|
|
161 |
prompt = format_prompt(prompt, suffix)
|
162 |
print(prompt)
|
163 |
|
@@ -188,6 +202,11 @@ with gr.Blocks(css=css) as demo:
|
|
188 |
""")
|
189 |
|
190 |
with gr.Row():
|
|
|
|
|
|
|
|
|
|
|
191 |
run_button = gr.Button("Generate random", scale=0)
|
192 |
|
193 |
result = gr.Image(label="Result", show_label=False)
|
@@ -199,13 +218,13 @@ with gr.Blocks(css=css) as demo:
|
|
199 |
prompt_suffix = gr.Text(
|
200 |
label="Prompt suffix",
|
201 |
visible=True,
|
202 |
-
value=
|
203 |
)
|
204 |
negative_prompt = gr.Text(
|
205 |
label="Negative prompt",
|
206 |
placeholder="Enter a negative prompt",
|
207 |
visible=True,
|
208 |
-
value=
|
209 |
)
|
210 |
|
211 |
seed = gr.Slider(
|
@@ -241,7 +260,7 @@ with gr.Blocks(css=css) as demo:
|
|
241 |
minimum=1.0,
|
242 |
maximum=10.0,
|
243 |
step=0.5,
|
244 |
-
value=
|
245 |
)
|
246 |
|
247 |
num_inference_steps = gr.Slider(
|
@@ -256,6 +275,7 @@ with gr.Blocks(css=css) as demo:
|
|
256 |
triggers=[run_button.click],
|
257 |
fn=on_generate,
|
258 |
inputs=[
|
|
|
259 |
prompt_suffix,
|
260 |
negative_prompt,
|
261 |
seed,
|
|
|
36 |
DART_V3_REPO_ID = os.environ.get("DART_V3_REPO_ID", None)
|
37 |
assert DART_V3_REPO_ID is not None
|
38 |
|
|
|
|
|
39 |
dart = AutoModelForCausalLM.from_pretrained(
|
40 |
DART_V3_REPO_ID,
|
41 |
+
torch_dtype=torch.bfloat16,
|
42 |
token=HF_TOKEN,
|
43 |
use_cache=True,
|
44 |
+
device_map="cpu",
|
45 |
)
|
46 |
dart = dart.eval()
|
47 |
dart = dart.requires_grad_(False)
|
|
|
49 |
tokenizer = AutoTokenizer.from_pretrained(DART_V3_REPO_ID)
|
50 |
|
51 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
52 |
+
IMAGE_MODEL_REPO_ID,
|
53 |
+
torch_dtype=torch.bfloat16,
|
54 |
add_watermarker=False,
|
55 |
+
custom_pipeline="lpw_stable_diffusion_xl",
|
56 |
)
|
57 |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
58 |
pipe.unet.set_attn_processor(AttnProcessor2_0())
|
59 |
+
if device == "cuda":
|
60 |
+
pipe.enable_sequential_cpu_offload(gpu_id=0, device="cuda")
|
61 |
|
62 |
|
63 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
70 |
"{aspect_ratio}"
|
71 |
"<|length:medium|>"
|
72 |
#
|
73 |
+
"<copyright></copyright>"
|
74 |
#
|
75 |
"<character></character>"
|
76 |
#
|
77 |
+
"<general>{subject}"
|
78 |
)
|
79 |
+
QUALITY_TAGS = "masterpiece, best quality, very aesthetic, newest"
|
80 |
+
NEGATIVE_PROMPT = "nsfw, (worst quality, bad quality:1.2), very displeasing, lowres, jaggy lines, 3d, watermark, signature, copyright, logo, blurry, ugly, poorly drawn, retro, scan, white outline"
|
81 |
+
|
82 |
|
83 |
def get_aspect_ratio(width: int, height: int) -> str:
|
84 |
ar = width / height
|
85 |
|
86 |
if ar <= 1 / math.sqrt(3):
|
87 |
+
return "<|aspect_ratio:ultra_tall|>"
|
88 |
+
elif ar <= 8 / 9:
|
89 |
+
return "<|aspect_ratio:tall|>"
|
90 |
elif ar < 9 / 8:
|
91 |
return "<|aspect_ratio:square|>"
|
92 |
elif ar < math.sqrt(3):
|
93 |
+
return "<|aspect_ratio:wide|>"
|
94 |
else:
|
95 |
+
return "<|aspect_ratio:ultra_wide|>"
|
96 |
|
97 |
|
98 |
@torch.inference_mode
|
99 |
+
def generate_prompt(subject: str, aspect_ratio: str):
|
100 |
input_ids = tokenizer.encode_plus(
|
101 |
+
TEMPLATE.format(aspect_ratio=aspect_ratio, subject=subject),
|
102 |
return_tensors="pt",
|
103 |
).input_ids
|
104 |
print("input_ids:", input_ids)
|
|
|
114 |
)[0]
|
115 |
|
116 |
generated = output_ids[len(input_ids) :]
|
117 |
+
decoded = ", ".join(
|
118 |
+
[
|
119 |
+
token
|
120 |
+
for token in tokenizer.batch_decode(generated, skip_special_tokens=True)
|
121 |
+
if token.strip() != ""
|
122 |
+
]
|
123 |
+
)
|
124 |
print("decoded:", decoded)
|
125 |
|
126 |
return decoded
|
127 |
|
128 |
+
|
129 |
def format_prompt(prompt: str, prompt_suffix: str):
|
130 |
return f"{prompt}, {prompt_suffix}"
|
131 |
|
132 |
+
|
133 |
@spaces.GPU
|
134 |
def generate_image(
|
135 |
prompt: str,
|
|
|
152 |
|
153 |
return image
|
154 |
|
155 |
+
|
156 |
def on_generate(
|
157 |
+
subject: str,
|
158 |
suffix: str,
|
159 |
negative_prompt: str,
|
160 |
seed,
|
|
|
170 |
generator = torch.Generator().manual_seed(seed)
|
171 |
|
172 |
ar = get_aspect_ratio(width, height)
|
173 |
+
print("ar:", ar)
|
174 |
+
prompt = generate_prompt(subject, ar)
|
175 |
prompt = format_prompt(prompt, suffix)
|
176 |
print(prompt)
|
177 |
|
|
|
202 |
""")
|
203 |
|
204 |
with gr.Row():
|
205 |
+
subject_radio = gr.Dropdown(
|
206 |
+
label="Subject",
|
207 |
+
choices=["1girl", "2girls", "1boy", "no humans"],
|
208 |
+
value="1girl",
|
209 |
+
)
|
210 |
run_button = gr.Button("Generate random", scale=0)
|
211 |
|
212 |
result = gr.Image(label="Result", show_label=False)
|
|
|
218 |
prompt_suffix = gr.Text(
|
219 |
label="Prompt suffix",
|
220 |
visible=True,
|
221 |
+
value=QUALITY_TAGS,
|
222 |
)
|
223 |
negative_prompt = gr.Text(
|
224 |
label="Negative prompt",
|
225 |
placeholder="Enter a negative prompt",
|
226 |
visible=True,
|
227 |
+
value=NEGATIVE_PROMPT,
|
228 |
)
|
229 |
|
230 |
seed = gr.Slider(
|
|
|
260 |
minimum=1.0,
|
261 |
maximum=10.0,
|
262 |
step=0.5,
|
263 |
+
value=6.5,
|
264 |
)
|
265 |
|
266 |
num_inference_steps = gr.Slider(
|
|
|
275 |
triggers=[run_button.click],
|
276 |
fn=on_generate,
|
277 |
inputs=[
|
278 |
+
subject_radio,
|
279 |
prompt_suffix,
|
280 |
negative_prompt,
|
281 |
seed,
|