Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -3,9 +3,89 @@ import gradio as gr
|
|
3 |
import numpy as np
|
4 |
import spaces
|
5 |
import torch
|
6 |
-
from diffusers import AutoPipelineForText2Image, AutoencoderKL
|
7 |
from compel import Compel, ReturnedEmbeddingsType
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
if not torch.cuda.is_available():
|
10 |
DESCRIPTION += "\n<p>你现在运行在CPU上 但是此项目只支持GPU.</p>"
|
11 |
|
@@ -21,7 +101,6 @@ if torch.cuda.is_available():
|
|
21 |
use_safetensors=True,
|
22 |
add_watermarker=False
|
23 |
)
|
24 |
-
#pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
|
25 |
pipe.to("cuda")
|
26 |
|
27 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
@@ -45,14 +124,28 @@ def infer(
|
|
45 |
):
|
46 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
47 |
generator = torch.Generator().manual_seed(seed)
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
image = pipe(
|
52 |
-
#prompt=prompt,
|
53 |
prompt_embeds=conditioning,
|
54 |
pooled_prompt_embeds=pooled,
|
55 |
-
|
|
|
56 |
width=width,
|
57 |
height=height,
|
58 |
guidance_scale=guidance_scale,
|
@@ -60,6 +153,7 @@ def infer(
|
|
60 |
generator=generator,
|
61 |
use_resolution_binning=use_resolution_binning,
|
62 |
).images[0]
|
|
|
63 |
return image, seed
|
64 |
|
65 |
examples = [
|
@@ -74,7 +168,7 @@ footer {
|
|
74 |
visibility: hidden
|
75 |
}
|
76 |
'''
|
77 |
-
|
78 |
with gr.Blocks(css=css) as demo:
|
79 |
gr.Markdown("""# 梦羽的模型生成器
|
80 |
### 快速生成NoobAIXL v0.5的模型图片 V1.0模型在另一个项目上""")
|
@@ -145,7 +239,7 @@ with gr.Blocks(css=css) as demo:
|
|
145 |
outputs=[result, seed],
|
146 |
fn=infer
|
147 |
)
|
148 |
-
|
149 |
use_negative_prompt.change(
|
150 |
fn=lambda x: gr.update(visible=x),
|
151 |
inputs=use_negative_prompt,
|
@@ -153,7 +247,7 @@ with gr.Blocks(css=css) as demo:
|
|
153 |
)
|
154 |
|
155 |
gr.on(
|
156 |
-
triggers=[prompt.submit,run_button.click],
|
157 |
fn=infer,
|
158 |
inputs=[
|
159 |
prompt,
|
|
|
3 |
import numpy as np
|
4 |
import spaces
|
5 |
import torch
|
6 |
+
from diffusers import AutoPipelineForText2Image, AutoencoderKL
|
7 |
from compel import Compel, ReturnedEmbeddingsType
|
8 |
|
9 |
+
import re
|
10 |
+
|
11 |
+
def tokenize_line(text, tokenizer):
|
12 |
+
tokens = tokenizer.tokenize(text)
|
13 |
+
return tokens
|
14 |
+
|
15 |
+
def parse_prompt_attention(text):
|
16 |
+
res = []
|
17 |
+
pattern = re.compile(r"\(([^)]+):([\d\.]+)\)")
|
18 |
+
matches = pattern.findall(text)
|
19 |
+
for match in matches:
|
20 |
+
res.append((match[0], float(match[1])))
|
21 |
+
return res
|
22 |
+
|
23 |
+
def prompt_attention_to_invoke_prompt(attention_list):
|
24 |
+
prompt = ""
|
25 |
+
for item in attention_list:
|
26 |
+
prompt += f"({item[0]}:{item[1]}) "
|
27 |
+
return prompt.strip()
|
28 |
+
|
29 |
+
def merge_embeds(prompts, compel):
|
30 |
+
embeds = []
|
31 |
+
pooled_embeds = []
|
32 |
+
for prompt in prompts:
|
33 |
+
conditioning, pooled = compel(prompt)
|
34 |
+
embeds.append(conditioning)
|
35 |
+
pooled_embeds.append(pooled)
|
36 |
+
# 合并嵌入,这里使用平均值,可以根据需要调整
|
37 |
+
merged_embed = torch.mean(torch.stack(embeds), dim=0)
|
38 |
+
merged_pooled = torch.mean(torch.stack(pooled_embeds), dim=0)
|
39 |
+
return merged_embed, merged_pooled
|
40 |
+
|
41 |
+
def get_embed_new(prompt, pipeline, compel, only_convert_string=False, compel_process_sd=False):
|
42 |
+
if compel_process_sd:
|
43 |
+
return merge_embeds(tokenize_line(prompt, pipeline.tokenizer), compel)
|
44 |
+
else:
|
45 |
+
# fix bug weights conversion excessive emphasis
|
46 |
+
prompt = prompt.replace("((", "(").replace("))", ")")
|
47 |
+
|
48 |
+
# Convert to Compel
|
49 |
+
attention = parse_prompt_attention(prompt)
|
50 |
+
global_attention_chunks = []
|
51 |
+
|
52 |
+
for att in attention:
|
53 |
+
for chunk in att[0].split(','):
|
54 |
+
temp_prompt_chunks = tokenize_line(chunk, pipeline.tokenizer)
|
55 |
+
for small_chunk in temp_prompt_chunks:
|
56 |
+
temp_dict = {
|
57 |
+
"weight": round(att[1], 2),
|
58 |
+
"length": len(pipeline.tokenizer.tokenize(f'{small_chunk},')),
|
59 |
+
"prompt": f'{small_chunk},'
|
60 |
+
}
|
61 |
+
global_attention_chunks.append(temp_dict)
|
62 |
+
|
63 |
+
max_tokens = pipeline.tokenizer.model_max_length - 2
|
64 |
+
global_prompt_chunks = []
|
65 |
+
current_list = []
|
66 |
+
current_length = 0
|
67 |
+
for item in global_attention_chunks:
|
68 |
+
if current_length + item['length'] > max_tokens:
|
69 |
+
global_prompt_chunks.append(current_list)
|
70 |
+
current_list = [[item['prompt'], item['weight']]]
|
71 |
+
current_length = item['length']
|
72 |
+
else:
|
73 |
+
if not current_list:
|
74 |
+
current_list.append([item['prompt'], item['weight']])
|
75 |
+
else:
|
76 |
+
if item['weight'] != current_list[-1][1]:
|
77 |
+
current_list.append([item['prompt'], item['weight']])
|
78 |
+
else:
|
79 |
+
current_list[-1][0] += f" {item['prompt']}"
|
80 |
+
current_length += item['length']
|
81 |
+
if current_list:
|
82 |
+
global_prompt_chunks.append(current_list)
|
83 |
+
|
84 |
+
if only_convert_string:
|
85 |
+
return ' '.join([prompt_attention_to_invoke_prompt(i) for i in global_prompt_chunks])
|
86 |
+
|
87 |
+
return merge_embeds([prompt_attention_to_invoke_prompt(i) for i in global_prompt_chunks], compel)
|
88 |
+
|
89 |
if not torch.cuda.is_available():
|
90 |
DESCRIPTION += "\n<p>你现在运行在CPU上 但是此项目只支持GPU.</p>"
|
91 |
|
|
|
101 |
use_safetensors=True,
|
102 |
add_watermarker=False
|
103 |
)
|
|
|
104 |
pipe.to("cuda")
|
105 |
|
106 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
|
124 |
):
|
125 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
126 |
generator = torch.Generator().manual_seed(seed)
|
127 |
+
# 初始化 Compel 实例
|
128 |
+
compel_instance = Compel(
|
129 |
+
tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
|
130 |
+
text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
|
131 |
+
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
|
132 |
+
requires_pooled=[False, True]
|
133 |
+
)
|
134 |
+
# 使用 get_embed_new 获取嵌入
|
135 |
+
conditioning, pooled = get_embed_new(prompt, pipe, compel_instance)
|
136 |
+
|
137 |
+
# 处理反向提示(negative_prompt)
|
138 |
+
if use_negative_prompt and negative_prompt:
|
139 |
+
negative_conditioning, negative_pooled = get_embed_new(negative_prompt, pipe, compel_instance)
|
140 |
+
else:
|
141 |
+
negative_conditioning = None
|
142 |
+
negative_pooled = None
|
143 |
+
|
144 |
image = pipe(
|
|
|
145 |
prompt_embeds=conditioning,
|
146 |
pooled_prompt_embeds=pooled,
|
147 |
+
negative_prompt_embeds=negative_conditioning,
|
148 |
+
negative_pooled_prompt_embeds=negative_pooled,
|
149 |
width=width,
|
150 |
height=height,
|
151 |
guidance_scale=guidance_scale,
|
|
|
153 |
generator=generator,
|
154 |
use_resolution_binning=use_resolution_binning,
|
155 |
).images[0]
|
156 |
+
image.save("output_image.png")
|
157 |
return image, seed
|
158 |
|
159 |
examples = [
|
|
|
168 |
visibility: hidden
|
169 |
}
|
170 |
'''
|
171 |
+
|
172 |
with gr.Blocks(css=css) as demo:
|
173 |
gr.Markdown("""# 梦羽的模型生成器
|
174 |
### 快速生成NoobAIXL v0.5的模型图片 V1.0模型在另一个项目上""")
|
|
|
239 |
outputs=[result, seed],
|
240 |
fn=infer
|
241 |
)
|
242 |
+
|
243 |
use_negative_prompt.change(
|
244 |
fn=lambda x: gr.update(visible=x),
|
245 |
inputs=use_negative_prompt,
|
|
|
247 |
)
|
248 |
|
249 |
gr.on(
|
250 |
+
triggers=[prompt.submit, run_button.click],
|
251 |
fn=infer,
|
252 |
inputs=[
|
253 |
prompt,
|