poipiii commited on
Commit
02ceb21
1 Parent(s): a85ef96

first commit

Browse files
Files changed (1) hide show
  1. lpw_hires_fix.py +1205 -0
lpw_hires_fix.py ADDED
@@ -0,0 +1,1205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import re
3
+ from typing import Callable, List, Optional, Union
4
+
5
+ import numpy as np
6
+ import PIL
7
+ import torch
8
+ from packaging import version
9
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
10
+
11
+ import diffusers
12
+ from diffusers import SchedulerMixin, StableDiffusionPipeline
13
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
14
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
15
+ from diffusers.utils import logging
16
+
17
+
18
+ try:
19
+ from diffusers.utils import PIL_INTERPOLATION
20
+ except ImportError:
21
+ if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
22
+ PIL_INTERPOLATION = {
23
+ "linear": PIL.Image.Resampling.BILINEAR,
24
+ "bilinear": PIL.Image.Resampling.BILINEAR,
25
+ "bicubic": PIL.Image.Resampling.BICUBIC,
26
+ "lanczos": PIL.Image.Resampling.LANCZOS,
27
+ "nearest": PIL.Image.Resampling.NEAREST,
28
+ }
29
+ else:
30
+ PIL_INTERPOLATION = {
31
+ "linear": PIL.Image.LINEAR,
32
+ "bilinear": PIL.Image.BILINEAR,
33
+ "bicubic": PIL.Image.BICUBIC,
34
+ "lanczos": PIL.Image.LANCZOS,
35
+ "nearest": PIL.Image.NEAREST,
36
+ }
37
+ # ------------------------------------------------------------------------------
38
+
39
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
40
+
41
+ re_attention = re.compile(
42
+ r"""
43
+ \\\(|
44
+ \\\)|
45
+ \\\[|
46
+ \\]|
47
+ \\\\|
48
+ \\|
49
+ \(|
50
+ \[|
51
+ :([+-]?[.\d]+)\)|
52
+ \)|
53
+ ]|
54
+ [^\\()\[\]:]+|
55
+ :
56
+ """,
57
+ re.X,
58
+ )
59
+
60
+
61
+ def parse_prompt_attention(text):
62
+ """
63
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
64
+ Accepted tokens are:
65
+ (abc) - increases attention to abc by a multiplier of 1.1
66
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
67
+ [abc] - decreases attention to abc by a multiplier of 1.1
68
+ \( - literal character '('
69
+ \[ - literal character '['
70
+ \) - literal character ')'
71
+ \] - literal character ']'
72
+ \\ - literal character '\'
73
+ anything else - just text
74
+ >>> parse_prompt_attention('normal text')
75
+ [['normal text', 1.0]]
76
+ >>> parse_prompt_attention('an (important) word')
77
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
78
+ >>> parse_prompt_attention('(unbalanced')
79
+ [['unbalanced', 1.1]]
80
+ >>> parse_prompt_attention('\(literal\]')
81
+ [['(literal]', 1.0]]
82
+ >>> parse_prompt_attention('(unnecessary)(parens)')
83
+ [['unnecessaryparens', 1.1]]
84
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
85
+ [['a ', 1.0],
86
+ ['house', 1.5730000000000004],
87
+ [' ', 1.1],
88
+ ['on', 1.0],
89
+ [' a ', 1.1],
90
+ ['hill', 0.55],
91
+ [', sun, ', 1.1],
92
+ ['sky', 1.4641000000000006],
93
+ ['.', 1.1]]
94
+ """
95
+
96
+ res = []
97
+ round_brackets = []
98
+ square_brackets = []
99
+
100
+ round_bracket_multiplier = 1.1
101
+ square_bracket_multiplier = 1 / 1.1
102
+
103
+ def multiply_range(start_position, multiplier):
104
+ for p in range(start_position, len(res)):
105
+ res[p][1] *= multiplier
106
+
107
+ for m in re_attention.finditer(text):
108
+ text = m.group(0)
109
+ weight = m.group(1)
110
+
111
+ if text.startswith("\\"):
112
+ res.append([text[1:], 1.0])
113
+ elif text == "(":
114
+ round_brackets.append(len(res))
115
+ elif text == "[":
116
+ square_brackets.append(len(res))
117
+ elif weight is not None and len(round_brackets) > 0:
118
+ multiply_range(round_brackets.pop(), float(weight))
119
+ elif text == ")" and len(round_brackets) > 0:
120
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
121
+ elif text == "]" and len(square_brackets) > 0:
122
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
123
+ else:
124
+ res.append([text, 1.0])
125
+
126
+ for pos in round_brackets:
127
+ multiply_range(pos, round_bracket_multiplier)
128
+
129
+ for pos in square_brackets:
130
+ multiply_range(pos, square_bracket_multiplier)
131
+
132
+ if len(res) == 0:
133
+ res = [["", 1.0]]
134
+
135
+ # merge runs of identical weights
136
+ i = 0
137
+ while i + 1 < len(res):
138
+ if res[i][1] == res[i + 1][1]:
139
+ res[i][0] += res[i + 1][0]
140
+ res.pop(i + 1)
141
+ else:
142
+ i += 1
143
+
144
+ return res
145
+
146
+
147
+ def get_prompts_with_weights(pipe: StableDiffusionPipeline, prompt: List[str], max_length: int):
148
+ r"""
149
+ Tokenize a list of prompts and return its tokens with weights of each token.
150
+ No padding, starting or ending token is included.
151
+ """
152
+ tokens = []
153
+ weights = []
154
+ truncated = False
155
+ for text in prompt:
156
+ texts_and_weights = parse_prompt_attention(text)
157
+ text_token = []
158
+ text_weight = []
159
+ for word, weight in texts_and_weights:
160
+ # tokenize and discard the starting and the ending token
161
+ token = pipe.tokenizer(word).input_ids[1:-1]
162
+ text_token += token
163
+ # copy the weight by length of token
164
+ text_weight += [weight] * len(token)
165
+ # stop if the text is too long (longer than truncation limit)
166
+ if len(text_token) > max_length:
167
+ truncated = True
168
+ break
169
+ # truncate
170
+ if len(text_token) > max_length:
171
+ truncated = True
172
+ text_token = text_token[:max_length]
173
+ text_weight = text_weight[:max_length]
174
+ tokens.append(text_token)
175
+ weights.append(text_weight)
176
+ if truncated:
177
+ logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
178
+ return tokens, weights
179
+
180
+
181
+ def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
182
+ r"""
183
+ Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
184
+ """
185
+ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
186
+ weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
187
+ for i in range(len(tokens)):
188
+ tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
189
+ if no_boseos_middle:
190
+ weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
191
+ else:
192
+ w = []
193
+ if len(weights[i]) == 0:
194
+ w = [1.0] * weights_length
195
+ else:
196
+ for j in range(max_embeddings_multiples):
197
+ w.append(1.0) # weight for starting token in this chunk
198
+ w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
199
+ w.append(1.0) # weight for ending token in this chunk
200
+ w += [1.0] * (weights_length - len(w))
201
+ weights[i] = w[:]
202
+
203
+ return tokens, weights
204
+
205
+
206
+ def get_unweighted_text_embeddings(
207
+ pipe: StableDiffusionPipeline,
208
+ text_input: torch.Tensor,
209
+ chunk_length: int,
210
+ no_boseos_middle: Optional[bool] = True,
211
+ ):
212
+ """
213
+ When the length of tokens is a multiple of the capacity of the text encoder,
214
+ it should be split into chunks and sent to the text encoder individually.
215
+ """
216
+ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
217
+ if max_embeddings_multiples > 1:
218
+ text_embeddings = []
219
+ for i in range(max_embeddings_multiples):
220
+ # extract the i-th chunk
221
+ text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
222
+
223
+ # cover the head and the tail by the starting and the ending tokens
224
+ text_input_chunk[:, 0] = text_input[0, 0]
225
+ text_input_chunk[:, -1] = text_input[0, -1]
226
+ text_embedding = pipe.text_encoder(text_input_chunk)[0]
227
+
228
+ if no_boseos_middle:
229
+ if i == 0:
230
+ # discard the ending token
231
+ text_embedding = text_embedding[:, :-1]
232
+ elif i == max_embeddings_multiples - 1:
233
+ # discard the starting token
234
+ text_embedding = text_embedding[:, 1:]
235
+ else:
236
+ # discard both starting and ending tokens
237
+ text_embedding = text_embedding[:, 1:-1]
238
+
239
+ text_embeddings.append(text_embedding)
240
+ text_embeddings = torch.concat(text_embeddings, axis=1)
241
+ else:
242
+ text_embeddings = pipe.text_encoder(text_input)[0]
243
+ return text_embeddings
244
+
245
+
246
+ def get_weighted_text_embeddings(
247
+ pipe: StableDiffusionPipeline,
248
+ prompt: Union[str, List[str]],
249
+ uncond_prompt: Optional[Union[str, List[str]]] = None,
250
+ max_embeddings_multiples: Optional[int] = 3,
251
+ no_boseos_middle: Optional[bool] = False,
252
+ skip_parsing: Optional[bool] = False,
253
+ skip_weighting: Optional[bool] = False,
254
+ ):
255
+ r"""
256
+ Prompts can be assigned with local weights using brackets. For example,
257
+ prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
258
+ and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
259
+ Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
260
+ Args:
261
+ pipe (`StableDiffusionPipeline`):
262
+ Pipe to provide access to the tokenizer and the text encoder.
263
+ prompt (`str` or `List[str]`):
264
+ The prompt or prompts to guide the image generation.
265
+ uncond_prompt (`str` or `List[str]`):
266
+ The unconditional prompt or prompts for guide the image generation. If unconditional prompt
267
+ is provided, the embeddings of prompt and uncond_prompt are concatenated.
268
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
269
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
270
+ no_boseos_middle (`bool`, *optional*, defaults to `False`):
271
+ If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
272
+ ending token in each of the chunk in the middle.
273
+ skip_parsing (`bool`, *optional*, defaults to `False`):
274
+ Skip the parsing of brackets.
275
+ skip_weighting (`bool`, *optional*, defaults to `False`):
276
+ Skip the weighting. When the parsing is skipped, it is forced True.
277
+ """
278
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
279
+ if isinstance(prompt, str):
280
+ prompt = [prompt]
281
+
282
+ if not skip_parsing:
283
+ prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
284
+ if uncond_prompt is not None:
285
+ if isinstance(uncond_prompt, str):
286
+ uncond_prompt = [uncond_prompt]
287
+ uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
288
+ else:
289
+ prompt_tokens = [
290
+ token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
291
+ ]
292
+ prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
293
+ if uncond_prompt is not None:
294
+ if isinstance(uncond_prompt, str):
295
+ uncond_prompt = [uncond_prompt]
296
+ uncond_tokens = [
297
+ token[1:-1]
298
+ for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
299
+ ]
300
+ uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
301
+
302
+ # round up the longest length of tokens to a multiple of (model_max_length - 2)
303
+ max_length = max([len(token) for token in prompt_tokens])
304
+ if uncond_prompt is not None:
305
+ max_length = max(max_length, max([len(token) for token in uncond_tokens]))
306
+
307
+ max_embeddings_multiples = min(
308
+ max_embeddings_multiples,
309
+ (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
310
+ )
311
+ max_embeddings_multiples = max(1, max_embeddings_multiples)
312
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
313
+
314
+ # pad the length of tokens and weights
315
+ bos = pipe.tokenizer.bos_token_id
316
+ eos = pipe.tokenizer.eos_token_id
317
+ pad = getattr(pipe.tokenizer, "pad_token_id", eos)
318
+ prompt_tokens, prompt_weights = pad_tokens_and_weights(
319
+ prompt_tokens,
320
+ prompt_weights,
321
+ max_length,
322
+ bos,
323
+ eos,
324
+ pad,
325
+ no_boseos_middle=no_boseos_middle,
326
+ chunk_length=pipe.tokenizer.model_max_length,
327
+ )
328
+ prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device)
329
+ if uncond_prompt is not None:
330
+ uncond_tokens, uncond_weights = pad_tokens_and_weights(
331
+ uncond_tokens,
332
+ uncond_weights,
333
+ max_length,
334
+ bos,
335
+ eos,
336
+ pad,
337
+ no_boseos_middle=no_boseos_middle,
338
+ chunk_length=pipe.tokenizer.model_max_length,
339
+ )
340
+ uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device)
341
+
342
+ # get the embeddings
343
+ text_embeddings = get_unweighted_text_embeddings(
344
+ pipe,
345
+ prompt_tokens,
346
+ pipe.tokenizer.model_max_length,
347
+ no_boseos_middle=no_boseos_middle,
348
+ )
349
+ prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=pipe.device)
350
+ if uncond_prompt is not None:
351
+ uncond_embeddings = get_unweighted_text_embeddings(
352
+ pipe,
353
+ uncond_tokens,
354
+ pipe.tokenizer.model_max_length,
355
+ no_boseos_middle=no_boseos_middle,
356
+ )
357
+ uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=pipe.device)
358
+
359
+ # assign weights to the prompts and normalize in the sense of mean
360
+ # TODO: should we normalize by chunk or in a whole (current implementation)?
361
+ if (not skip_parsing) and (not skip_weighting):
362
+ previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
363
+ text_embeddings *= prompt_weights.unsqueeze(-1)
364
+ current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
365
+ text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
366
+ if uncond_prompt is not None:
367
+ previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
368
+ uncond_embeddings *= uncond_weights.unsqueeze(-1)
369
+ current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
370
+ uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
371
+
372
+ if uncond_prompt is not None:
373
+ return text_embeddings, uncond_embeddings
374
+ return text_embeddings, None
375
+
376
+
377
+ def preprocess_image(image):
378
+ w, h = image.size
379
+ w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
380
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
381
+ image = np.array(image).astype(np.float32) / 255.0
382
+ image = image[None].transpose(0, 3, 1, 2)
383
+ image = torch.from_numpy(image)
384
+ return 2.0 * image - 1.0
385
+
386
+
387
+ def preprocess_mask(mask, scale_factor=8):
388
+ mask = mask.convert("L")
389
+ w, h = mask.size
390
+ w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
391
+ mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
392
+ mask = np.array(mask).astype(np.float32) / 255.0
393
+ mask = np.tile(mask, (4, 1, 1))
394
+ mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
395
+ mask = 1 - mask # repaint white, keep black
396
+ mask = torch.from_numpy(mask)
397
+ return mask
398
+
399
+
400
+ class StableDiffusionLongPromptWeightingPipeline(StableDiffusionPipeline):
401
+ r"""
402
+ Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
403
+ weighting in prompt.
404
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
405
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
406
+ Args:
407
+ vae ([`AutoencoderKL`]):
408
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
409
+ text_encoder ([`CLIPTextModel`]):
410
+ Frozen text-encoder. Stable Diffusion uses the text portion of
411
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
412
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
413
+ tokenizer (`CLIPTokenizer`):
414
+ Tokenizer of class
415
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
416
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
417
+ scheduler ([`SchedulerMixin`]):
418
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
419
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
420
+ safety_checker ([`StableDiffusionSafetyChecker`]):
421
+ Classification module that estimates whether generated images could be considered offensive or harmful.
422
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
423
+ feature_extractor ([`CLIPImageProcessor`]):
424
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
425
+ """
426
+
427
+ if version.parse(version.parse(diffusers.__version__).base_version) >= version.parse("0.9.0"):
428
+
429
+ def __init__(
430
+ self,
431
+ vae: AutoencoderKL,
432
+ text_encoder: CLIPTextModel,
433
+ tokenizer: CLIPTokenizer,
434
+ unet: UNet2DConditionModel,
435
+ scheduler: SchedulerMixin,
436
+ safety_checker: StableDiffusionSafetyChecker,
437
+ feature_extractor: CLIPImageProcessor,
438
+ requires_safety_checker: bool = True,
439
+ ):
440
+ super().__init__(
441
+ vae=vae,
442
+ text_encoder=text_encoder,
443
+ tokenizer=tokenizer,
444
+ unet=unet,
445
+ scheduler=scheduler,
446
+ safety_checker=safety_checker,
447
+ feature_extractor=feature_extractor,
448
+ requires_safety_checker=requires_safety_checker,
449
+ )
450
+ self.__init__additional__()
451
+
452
+ else:
453
+
454
+ def __init__(
455
+ self,
456
+ vae: AutoencoderKL,
457
+ text_encoder: CLIPTextModel,
458
+ tokenizer: CLIPTokenizer,
459
+ unet: UNet2DConditionModel,
460
+ scheduler: SchedulerMixin,
461
+ safety_checker: StableDiffusionSafetyChecker,
462
+ feature_extractor: CLIPImageProcessor,
463
+ ):
464
+ super().__init__(
465
+ vae=vae,
466
+ text_encoder=text_encoder,
467
+ tokenizer=tokenizer,
468
+ unet=unet,
469
+ scheduler=scheduler,
470
+ safety_checker=safety_checker,
471
+ feature_extractor=feature_extractor,
472
+ )
473
+ self.__init__additional__()
474
+
475
+ def __init__additional__(self):
476
+ if not hasattr(self, "vae_scale_factor"):
477
+ setattr(self, "vae_scale_factor", 2 ** (len(self.vae.config.block_out_channels) - 1))
478
+
479
+ @property
480
+ def _execution_device(self):
481
+ r"""
482
+ Returns the device on which the pipeline's models will be executed. After calling
483
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
484
+ hooks.
485
+ """
486
+ if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
487
+ return self.device
488
+ for module in self.unet.modules():
489
+ if (
490
+ hasattr(module, "_hf_hook")
491
+ and hasattr(module._hf_hook, "execution_device")
492
+ and module._hf_hook.execution_device is not None
493
+ ):
494
+ return torch.device(module._hf_hook.execution_device)
495
+ return self.device
496
+
497
+ def _encode_prompt(
498
+ self,
499
+ prompt,
500
+ device,
501
+ num_images_per_prompt,
502
+ do_classifier_free_guidance,
503
+ negative_prompt,
504
+ max_embeddings_multiples,
505
+ ):
506
+ r"""
507
+ Encodes the prompt into text encoder hidden states.
508
+ Args:
509
+ prompt (`str` or `list(int)`):
510
+ prompt to be encoded
511
+ device: (`torch.device`):
512
+ torch device
513
+ num_images_per_prompt (`int`):
514
+ number of images that should be generated per prompt
515
+ do_classifier_free_guidance (`bool`):
516
+ whether to use classifier free guidance or not
517
+ negative_prompt (`str` or `List[str]`):
518
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
519
+ if `guidance_scale` is less than `1`).
520
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
521
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
522
+ """
523
+ batch_size = len(prompt) if isinstance(prompt, list) else 1
524
+
525
+ if negative_prompt is None:
526
+ negative_prompt = [""] * batch_size
527
+ elif isinstance(negative_prompt, str):
528
+ negative_prompt = [negative_prompt] * batch_size
529
+ if batch_size != len(negative_prompt):
530
+ raise ValueError(
531
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
532
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
533
+ " the batch size of `prompt`."
534
+ )
535
+
536
+ text_embeddings, uncond_embeddings = get_weighted_text_embeddings(
537
+ pipe=self,
538
+ prompt=prompt,
539
+ uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
540
+ max_embeddings_multiples=max_embeddings_multiples,
541
+ )
542
+ bs_embed, seq_len, _ = text_embeddings.shape
543
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
544
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
545
+
546
+ if do_classifier_free_guidance:
547
+ bs_embed, seq_len, _ = uncond_embeddings.shape
548
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
549
+ uncond_embeddings = uncond_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
550
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
551
+
552
+ return text_embeddings
553
+
554
+ def check_inputs(self, prompt, height, width, strength, callback_steps):
555
+ if not isinstance(prompt, str) and not isinstance(prompt, list):
556
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
557
+
558
+ if strength < 0 or strength > 1:
559
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
560
+
561
+ if height % 8 != 0 or width % 8 != 0:
562
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
563
+
564
+ if (callback_steps is None) or (
565
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
566
+ ):
567
+ raise ValueError(
568
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
569
+ f" {type(callback_steps)}."
570
+ )
571
+
572
+ def get_timesteps(self, num_inference_steps, strength, device, is_text2img):
573
+ if is_text2img:
574
+ return self.scheduler.timesteps.to(device), num_inference_steps
575
+ else:
576
+ # get the original timestep using init_timestep
577
+ offset = self.scheduler.config.get("steps_offset", 0)
578
+ init_timestep = int(num_inference_steps * strength) + offset
579
+ init_timestep = min(init_timestep, num_inference_steps)
580
+
581
+ t_start = max(num_inference_steps - init_timestep + offset, 0)
582
+ timesteps = self.scheduler.timesteps[t_start:].to(device)
583
+ return timesteps, num_inference_steps - t_start
584
+
585
+ def run_safety_checker(self, image, device, dtype):
586
+ if self.safety_checker is not None:
587
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
588
+ image, has_nsfw_concept = self.safety_checker(
589
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
590
+ )
591
+ else:
592
+ has_nsfw_concept = None
593
+ return image, has_nsfw_concept
594
+
595
+ def decode_latents(self, latents):
596
+ latents = 1 / 0.18215 * latents
597
+ image = self.vae.decode(latents).sample
598
+ image = (image / 2 + 0.5).clamp(0, 1)
599
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
600
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
601
+ return image
602
+
603
+ def prepare_extra_step_kwargs(self, generator, eta):
604
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
605
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
606
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
607
+ # and should be between [0, 1]
608
+
609
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
610
+ extra_step_kwargs = {}
611
+ if accepts_eta:
612
+ extra_step_kwargs["eta"] = eta
613
+
614
+ # check if the scheduler accepts generator
615
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
616
+ if accepts_generator:
617
+ extra_step_kwargs["generator"] = generator
618
+ return extra_step_kwargs
619
+
620
+ def prepare_latents(self, image, timestep, batch_size, height, width, dtype, device, generator, latents=None):
621
+ if image is None:
622
+ shape = (
623
+ batch_size,
624
+ self.unet.config.in_channels,
625
+ height // self.vae_scale_factor,
626
+ width // self.vae_scale_factor,
627
+ )
628
+
629
+ if latents is None:
630
+ if device.type == "mps":
631
+ # randn does not work reproducibly on mps
632
+ latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
633
+ else:
634
+ latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
635
+ else:
636
+ if latents.shape != shape:
637
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
638
+ latents = latents.to(device)
639
+
640
+ # scale the initial noise by the standard deviation required by the scheduler
641
+ latents = latents * self.scheduler.init_noise_sigma
642
+ return latents, None, None
643
+ else:
644
+ init_latent_dist = self.vae.encode(image).latent_dist
645
+ init_latents = init_latent_dist.sample(generator=generator)
646
+ init_latents = 0.18215 * init_latents
647
+ init_latents = torch.cat([init_latents] * batch_size, dim=0)
648
+ init_latents_orig = init_latents
649
+ shape = init_latents.shape
650
+
651
+ # add noise to latents using the timesteps
652
+ if device.type == "mps":
653
+ noise = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
654
+ else:
655
+ noise = torch.randn(shape, generator=generator, device=device, dtype=dtype)
656
+ latents = self.scheduler.add_noise(init_latents, noise, timestep)
657
+ return latents, init_latents_orig, noise
658
+
659
+ @torch.no_grad()
660
+ def __call__(
661
+ self,
662
+ prompt: Union[str, List[str]],
663
+ negative_prompt: Optional[Union[str, List[str]]] = None,
664
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
665
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
666
+ height: int = 512,
667
+ width: int = 512,
668
+ num_inference_steps: int = 50,
669
+ guidance_scale: float = 7.5,
670
+ strength: float = 0.8,
671
+ num_images_per_prompt: Optional[int] = 1,
672
+ eta: float = 0.0,
673
+ generator: Optional[torch.Generator] = None,
674
+ latents: Optional[torch.FloatTensor] = None,
675
+ max_embeddings_multiples: Optional[int] = 3,
676
+ output_type: Optional[str] = "pil",
677
+ return_dict: bool = True,
678
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
679
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
680
+ callback_steps: int = 1,
681
+ ):
682
+ r"""
683
+ Function invoked when calling the pipeline for generation.
684
+ Args:
685
+ prompt (`str` or `List[str]`):
686
+ The prompt or prompts to guide the image generation.
687
+ negative_prompt (`str` or `List[str]`, *optional*):
688
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
689
+ if `guidance_scale` is less than `1`).
690
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
691
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
692
+ process.
693
+ mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
694
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
695
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
696
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
697
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
698
+ height (`int`, *optional*, defaults to 512):
699
+ The height in pixels of the generated image.
700
+ width (`int`, *optional*, defaults to 512):
701
+ The width in pixels of the generated image.
702
+ num_inference_steps (`int`, *optional*, defaults to 50):
703
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
704
+ expense of slower inference.
705
+ guidance_scale (`float`, *optional*, defaults to 7.5):
706
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
707
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
708
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
709
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
710
+ usually at the expense of lower image quality.
711
+ strength (`float`, *optional*, defaults to 0.8):
712
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
713
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
714
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
715
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
716
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
717
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
718
+ The number of images to generate per prompt.
719
+ eta (`float`, *optional*, defaults to 0.0):
720
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
721
+ [`schedulers.DDIMScheduler`], will be ignored for others.
722
+ generator (`torch.Generator`, *optional*):
723
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
724
+ deterministic.
725
+ latents (`torch.FloatTensor`, *optional*):
726
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
727
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
728
+ tensor will ge generated by sampling using the supplied random `generator`.
729
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
730
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
731
+ output_type (`str`, *optional*, defaults to `"pil"`):
732
+ The output format of the generate image. Choose between
733
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
734
+ return_dict (`bool`, *optional*, defaults to `True`):
735
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
736
+ plain tuple.
737
+ callback (`Callable`, *optional*):
738
+ A function that will be called every `callback_steps` steps during inference. The function will be
739
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
740
+ is_cancelled_callback (`Callable`, *optional*):
741
+ A function that will be called every `callback_steps` steps during inference. If the function returns
742
+ `True`, the inference will be cancelled.
743
+ callback_steps (`int`, *optional*, defaults to 1):
744
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
745
+ called at every step.
746
+ Returns:
747
+ `None` if cancelled by `is_cancelled_callback`,
748
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
749
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
750
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
751
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
752
+ (nsfw) content, according to the `safety_checker`.
753
+ """
754
+ # 0. Default height and width to unet
755
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
756
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
757
+
758
+ # 1. Check inputs. Raise error if not correct
759
+ self.check_inputs(prompt, height, width, strength, callback_steps)
760
+
761
+ # 2. Define call parameters
762
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
763
+ device = self._execution_device
764
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
765
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
766
+ # corresponds to doing no classifier free guidance.
767
+ do_classifier_free_guidance = guidance_scale > 1.0
768
+
769
+ # 3. Encode input prompt
770
+ text_embeddings = self._encode_prompt(
771
+ prompt,
772
+ device,
773
+ num_images_per_prompt,
774
+ do_classifier_free_guidance,
775
+ negative_prompt,
776
+ max_embeddings_multiples,
777
+ )
778
+ dtype = text_embeddings.dtype
779
+
780
+ # 4. Preprocess image and mask
781
+ if isinstance(image, PIL.Image.Image):
782
+ image = preprocess_image(image)
783
+ if image is not None:
784
+ image = image.to(device=self.device, dtype=dtype)
785
+ if isinstance(mask_image, PIL.Image.Image):
786
+ mask_image = preprocess_mask(mask_image, self.vae_scale_factor)
787
+ if mask_image is not None:
788
+ mask = mask_image.to(device=self.device, dtype=dtype)
789
+ mask = torch.cat([mask] * batch_size * num_images_per_prompt)
790
+ else:
791
+ mask = None
792
+
793
+ # 5. set timesteps
794
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
795
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device, image is None)
796
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
797
+
798
+ # 6. Prepare latent variables
799
+ latents, init_latents_orig, noise = self.prepare_latents(
800
+ image,
801
+ latent_timestep,
802
+ batch_size * num_images_per_prompt,
803
+ height,
804
+ width,
805
+ dtype,
806
+ device,
807
+ generator,
808
+ latents,
809
+ )
810
+
811
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
812
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
813
+
814
+ # 8. Denoising loop
815
+ for i, t in enumerate(self.progress_bar(timesteps)):
816
+ # expand the latents if we are doing classifier free guidance
817
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
818
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
819
+
820
+ # predict the noise residual
821
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
822
+
823
+ # perform guidance
824
+ if do_classifier_free_guidance:
825
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
826
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
827
+
828
+ # compute the previous noisy sample x_t -> x_t-1
829
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
830
+
831
+ if mask is not None:
832
+ # masking
833
+ init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t]))
834
+ latents = (init_latents_proper * mask) + (latents * (1 - mask))
835
+
836
+ # call the callback, if provided
837
+ if i % callback_steps == 0:
838
+ if callback is not None:
839
+ callback(i, t, latents)
840
+ if is_cancelled_callback is not None and is_cancelled_callback():
841
+ return None
842
+
843
+ # 9. Post-processing
844
+ image = self.decode_latents(latents)
845
+
846
+ #do latent upscale here
847
+
848
+ # 10. Run safety checker
849
+ image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
850
+
851
+ # 11. Convert to PIL
852
+ if output_type == "pil":
853
+ image = self.numpy_to_pil(image)
854
+
855
+ if not return_dict:
856
+ return image, has_nsfw_concept
857
+
858
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
859
+
860
+ def text2img(
861
+ self,
862
+ prompt: Union[str, List[str]],
863
+ negative_prompt: Optional[Union[str, List[str]]] = None,
864
+ height: int = 512,
865
+ width: int = 512,
866
+ num_inference_steps: int = 50,
867
+ guidance_scale: float = 7.5,
868
+ num_images_per_prompt: Optional[int] = 1,
869
+ eta: float = 0.0,
870
+ generator: Optional[torch.Generator] = None,
871
+ latents: Optional[torch.FloatTensor] = None,
872
+ max_embeddings_multiples: Optional[int] = 3,
873
+ output_type: Optional[str] = "pil",
874
+ return_dict: bool = True,
875
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
876
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
877
+ callback_steps: int = 1,
878
+ ):
879
+ r"""
880
+ Function for text-to-image generation.
881
+ Args:
882
+ prompt (`str` or `List[str]`):
883
+ The prompt or prompts to guide the image generation.
884
+ negative_prompt (`str` or `List[str]`, *optional*):
885
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
886
+ if `guidance_scale` is less than `1`).
887
+ height (`int`, *optional*, defaults to 512):
888
+ The height in pixels of the generated image.
889
+ width (`int`, *optional*, defaults to 512):
890
+ The width in pixels of the generated image.
891
+ num_inference_steps (`int`, *optional*, defaults to 50):
892
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
893
+ expense of slower inference.
894
+ guidance_scale (`float`, *optional*, defaults to 7.5):
895
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
896
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
897
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
898
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
899
+ usually at the expense of lower image quality.
900
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
901
+ The number of images to generate per prompt.
902
+ eta (`float`, *optional*, defaults to 0.0):
903
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
904
+ [`schedulers.DDIMScheduler`], will be ignored for others.
905
+ generator (`torch.Generator`, *optional*):
906
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
907
+ deterministic.
908
+ latents (`torch.FloatTensor`, *optional*):
909
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
910
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
911
+ tensor will ge generated by sampling using the supplied random `generator`.
912
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
913
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
914
+ output_type (`str`, *optional*, defaults to `"pil"`):
915
+ The output format of the generate image. Choose between
916
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
917
+ return_dict (`bool`, *optional*, defaults to `True`):
918
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
919
+ plain tuple.
920
+ callback (`Callable`, *optional*):
921
+ A function that will be called every `callback_steps` steps during inference. The function will be
922
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
923
+ is_cancelled_callback (`Callable`, *optional*):
924
+ A function that will be called every `callback_steps` steps during inference. If the function returns
925
+ `True`, the inference will be cancelled.
926
+ callback_steps (`int`, *optional*, defaults to 1):
927
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
928
+ called at every step.
929
+ Returns:
930
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
931
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
932
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
933
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
934
+ (nsfw) content, according to the `safety_checker`.
935
+ """
936
+ return self.__call__(
937
+ prompt=prompt,
938
+ negative_prompt=negative_prompt,
939
+ height=height,
940
+ width=width,
941
+ num_inference_steps=num_inference_steps,
942
+ guidance_scale=guidance_scale,
943
+ num_images_per_prompt=num_images_per_prompt,
944
+ eta=eta,
945
+ generator=generator,
946
+ latents=latents,
947
+ max_embeddings_multiples=max_embeddings_multiples,
948
+ output_type=output_type,
949
+ return_dict=return_dict,
950
+ callback=callback,
951
+ is_cancelled_callback=is_cancelled_callback,
952
+ callback_steps=callback_steps,
953
+ )
954
+
955
+ def img2img(
956
+ self,
957
+ image: Union[torch.FloatTensor, PIL.Image.Image],
958
+ prompt: Union[str, List[str]],
959
+ negative_prompt: Optional[Union[str, List[str]]] = None,
960
+ strength: float = 0.8,
961
+ num_inference_steps: Optional[int] = 50,
962
+ guidance_scale: Optional[float] = 7.5,
963
+ num_images_per_prompt: Optional[int] = 1,
964
+ eta: Optional[float] = 0.0,
965
+ generator: Optional[torch.Generator] = None,
966
+ max_embeddings_multiples: Optional[int] = 3,
967
+ output_type: Optional[str] = "pil",
968
+ return_dict: bool = True,
969
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
970
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
971
+ callback_steps: int = 1,
972
+ ):
973
+ r"""
974
+ Function for image-to-image generation.
975
+ Args:
976
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
977
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
978
+ process.
979
+ prompt (`str` or `List[str]`):
980
+ The prompt or prompts to guide the image generation.
981
+ negative_prompt (`str` or `List[str]`, *optional*):
982
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
983
+ if `guidance_scale` is less than `1`).
984
+ strength (`float`, *optional*, defaults to 0.8):
985
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
986
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
987
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
988
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
989
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
990
+ num_inference_steps (`int`, *optional*, defaults to 50):
991
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
992
+ expense of slower inference. This parameter will be modulated by `strength`.
993
+ guidance_scale (`float`, *optional*, defaults to 7.5):
994
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
995
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
996
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
997
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
998
+ usually at the expense of lower image quality.
999
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1000
+ The number of images to generate per prompt.
1001
+ eta (`float`, *optional*, defaults to 0.0):
1002
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1003
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1004
+ generator (`torch.Generator`, *optional*):
1005
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
1006
+ deterministic.
1007
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1008
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1009
+ output_type (`str`, *optional*, defaults to `"pil"`):
1010
+ The output format of the generate image. Choose between
1011
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1012
+ return_dict (`bool`, *optional*, defaults to `True`):
1013
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1014
+ plain tuple.
1015
+ callback (`Callable`, *optional*):
1016
+ A function that will be called every `callback_steps` steps during inference. The function will be
1017
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1018
+ is_cancelled_callback (`Callable`, *optional*):
1019
+ A function that will be called every `callback_steps` steps during inference. If the function returns
1020
+ `True`, the inference will be cancelled.
1021
+ callback_steps (`int`, *optional*, defaults to 1):
1022
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1023
+ called at every step.
1024
+ Returns:
1025
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1026
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1027
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1028
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1029
+ (nsfw) content, according to the `safety_checker`.
1030
+ """
1031
+ return self.__call__(
1032
+ prompt=prompt,
1033
+ negative_prompt=negative_prompt,
1034
+ image=image,
1035
+ num_inference_steps=num_inference_steps,
1036
+ guidance_scale=guidance_scale,
1037
+ strength=strength,
1038
+ num_images_per_prompt=num_images_per_prompt,
1039
+ eta=eta,
1040
+ generator=generator,
1041
+ max_embeddings_multiples=max_embeddings_multiples,
1042
+ output_type=output_type,
1043
+ return_dict=return_dict,
1044
+ callback=callback,
1045
+ is_cancelled_callback=is_cancelled_callback,
1046
+ callback_steps=callback_steps,
1047
+ )
1048
+
1049
+ def inpaint(
1050
+ self,
1051
+ image: Union[torch.FloatTensor, PIL.Image.Image],
1052
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image],
1053
+ prompt: Union[str, List[str]],
1054
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1055
+ strength: float = 0.8,
1056
+ num_inference_steps: Optional[int] = 50,
1057
+ guidance_scale: Optional[float] = 7.5,
1058
+ num_images_per_prompt: Optional[int] = 1,
1059
+ eta: Optional[float] = 0.0,
1060
+ generator: Optional[torch.Generator] = None,
1061
+ max_embeddings_multiples: Optional[int] = 3,
1062
+ output_type: Optional[str] = "pil",
1063
+ return_dict: bool = True,
1064
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1065
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
1066
+ callback_steps: int = 1,
1067
+ ):
1068
+ r"""
1069
+ Function for inpaint.
1070
+ Args:
1071
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
1072
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
1073
+ process. This is the image whose masked region will be inpainted.
1074
+ mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
1075
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1076
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
1077
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
1078
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
1079
+ prompt (`str` or `List[str]`):
1080
+ The prompt or prompts to guide the image generation.
1081
+ negative_prompt (`str` or `List[str]`, *optional*):
1082
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1083
+ if `guidance_scale` is less than `1`).
1084
+ strength (`float`, *optional*, defaults to 0.8):
1085
+ Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
1086
+ is 1, the denoising process will be run on the masked area for the full number of iterations specified
1087
+ in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
1088
+ noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
1089
+ num_inference_steps (`int`, *optional*, defaults to 50):
1090
+ The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
1091
+ the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
1092
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1093
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1094
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1095
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1096
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1097
+ usually at the expense of lower image quality.
1098
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1099
+ The number of images to generate per prompt.
1100
+ eta (`float`, *optional*, defaults to 0.0):
1101
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1102
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1103
+ generator (`torch.Generator`, *optional*):
1104
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
1105
+ deterministic.
1106
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1107
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1108
+ output_type (`str`, *optional*, defaults to `"pil"`):
1109
+ The output format of the generate image. Choose between
1110
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1111
+ return_dict (`bool`, *optional*, defaults to `True`):
1112
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1113
+ plain tuple.
1114
+ callback (`Callable`, *optional*):
1115
+ A function that will be called every `callback_steps` steps during inference. The function will be
1116
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1117
+ is_cancelled_callback (`Callable`, *optional*):
1118
+ A function that will be called every `callback_steps` steps during inference. If the function returns
1119
+ `True`, the inference will be cancelled.
1120
+ callback_steps (`int`, *optional*, defaults to 1):
1121
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1122
+ called at every step.
1123
+ Returns:
1124
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1125
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1126
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1127
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1128
+ (nsfw) content, according to the `safety_checker`.
1129
+ """
1130
+ return self.__call__(
1131
+ prompt=prompt,
1132
+ negative_prompt=negative_prompt,
1133
+ image=image,
1134
+ mask_image=mask_image,
1135
+ num_inference_steps=num_inference_steps,
1136
+ guidance_scale=guidance_scale,
1137
+ strength=strength,
1138
+ num_images_per_prompt=num_images_per_prompt,
1139
+ eta=eta,
1140
+ generator=generator,
1141
+ max_embeddings_multiples=max_embeddings_multiples,
1142
+ output_type=output_type,
1143
+ return_dict=return_dict,
1144
+ callback=callback,
1145
+ is_cancelled_callback=is_cancelled_callback,
1146
+ callback_steps=callback_steps,
1147
+ )
1148
+
1149
+ def txt2img_highres(self,
1150
+ prompt: Union[str, List[str]],
1151
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1152
+ height: int = 512,
1153
+ width: int = 512,
1154
+ num_inference_steps: int = 50,
1155
+ num_inference_steps_upscale: int = 20,
1156
+ guidance_scale: float = 7.5,
1157
+ strength: float = 0.6,
1158
+ resize_scale : float = 2.0,
1159
+ num_images_per_prompt: Optional[int] = 1,
1160
+ eta: float = 0.0,
1161
+ generator: Optional[torch.Generator] = None,
1162
+ latents: Optional[torch.FloatTensor] = None,
1163
+ max_embeddings_multiples: Optional[int] = 3,
1164
+ output_type: Optional[str] = "pil",
1165
+ return_dict: bool = True,
1166
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1167
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
1168
+ callback_steps: int = 1,):
1169
+ image = self.__call__(
1170
+ prompt=prompt,
1171
+ negative_prompt=negative_prompt,
1172
+ height=height,
1173
+ width=width,
1174
+ num_inference_steps=num_inference_steps,
1175
+ guidance_scale=guidance_scale,
1176
+ num_images_per_prompt=num_images_per_prompt,
1177
+ eta=eta,
1178
+ generator=generator,
1179
+ latents=latents,
1180
+ max_embeddings_multiples=max_embeddings_multiples,
1181
+ output_type=output_type,
1182
+ return_dict=return_dict,
1183
+ callback=callback,
1184
+ is_cancelled_callback=is_cancelled_callback,
1185
+ callback_steps=callback_steps,
1186
+ )
1187
+
1188
+ resized_image = torch.nn.functional.interpolate(image, scale_factor=resize_scale, mode='nearest', align_corners=False)
1189
+
1190
+ highres_image = self.__call__(prompt=prompt,
1191
+ negative_prompt=negative_prompt,
1192
+ image=resized_image,
1193
+ num_inference_steps=num_inference_steps,
1194
+ guidance_scale=guidance_scale,
1195
+ strength=strength,
1196
+ num_images_per_prompt=num_images_per_prompt,
1197
+ eta=eta,
1198
+ generator=generator,
1199
+ max_embeddings_multiples=max_embeddings_multiples,
1200
+ output_type=output_type,
1201
+ return_dict=return_dict,
1202
+ callback=callback,
1203
+ is_cancelled_callback=is_cancelled_callback,
1204
+ callback_steps=callback_steps,)
1205
+ return highres_image