hahafofo's picture
init
48f4d16
raw
history blame
8.77 kB
import random
import re
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import pipeline, set_seed
from utils.image2text import git_image2text, w14_image2text, clip_image2text
from utils.singleton import Singleton
from utils.translate import en2zh as translate_en2zh
from utils.translate import zh2en as translate_zh2en
from utils.exif import get_image_info
device = "cuda" if torch.cuda.is_available() else "cpu"
@Singleton
class Models(object):
def __getattr__(self, item):
if item in self.__dict__:
return getattr(self, item)
if item in ('big_model', 'big_processor'):
self.big_model, self.big_processor = self.load_image2text_model()
if item in ('prompter_model', 'prompter_tokenizer'):
self.prompter_model, self.prompter_tokenizer = self.load_prompter_model()
if item in ('text_pipe',):
self.text_pipe = self.load_text_generation_pipeline()
return getattr(self, item)
@classmethod
def load_text_generation_pipeline(cls):
return pipeline('text-generation', model='succinctly/text2image-prompt-generator')
@classmethod
def load_prompter_model(cls):
prompter_model = AutoModelForCausalLM.from_pretrained("microsoft/Promptist")
tokenizer = AutoTokenizer.from_pretrained("gpt2")
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "left"
return prompter_model, tokenizer
models = Models.instance()
def generate_prompter(plain_text, max_new_tokens=75, num_beams=8, num_return_sequences=8, length_penalty=-1.0):
input_ids = models.prompter_tokenizer(plain_text.strip() + " Rephrase:", return_tensors="pt").input_ids
eos_id = models.prompter_tokenizer.eos_token_id
outputs = models.prompter_model.generate(
input_ids,
do_sample=False,
max_new_tokens=max_new_tokens,
num_beams=num_beams,
num_return_sequences=num_return_sequences,
eos_token_id=eos_id,
pad_token_id=eos_id,
length_penalty=length_penalty
)
output_texts = models.prompter_tokenizer.batch_decode(outputs, skip_special_tokens=True)
result = []
for output_text in output_texts:
result.append(output_text.replace(plain_text + " Rephrase:", "").strip())
return "\n".join(result)
def image_generate_prompter(
bclip_text,
w14_text,
max_new_tokens=75,
num_beams=8,
num_return_sequences=8,
length_penalty=-1.0
):
result = generate_prompter(
bclip_text,
max_new_tokens,
num_beams,
num_return_sequences,
length_penalty
)
return "\n".join(["{},{}".format(line.strip(), w14_text.strip()) for line in result.split("\n") if len(line) > 0])
def text_generate(text_in_english):
seed = random.randint(100, 1000000)
set_seed(seed)
result = ""
for _ in range(6):
sequences = models.text_pipe(text_in_english, max_length=random.randint(60, 90), num_return_sequences=8)
list = []
for sequence in sequences:
line = sequence['generated_text'].strip()
if line != text_in_english and len(line) > (len(text_in_english) + 4) and line.endswith(
(':', '-', '—')) is False:
list.append(line)
result = "\n".join(list)
result = re.sub('[^ ]+\.[^ ]+', '', result)
result = result.replace('<', '').replace('>', '')
if result != '':
break
return result, "\n".join(translate_en2zh(line) for line in result.split("\n") if len(line) > 0)
with gr.Blocks(title="Prompt生成器") as block:
with gr.Column():
with gr.Tab('从图片中生成'):
with gr.Row():
input_image = gr.Image(type='pil')
exif_info = gr.HTML()
output_blip_or_clip = gr.Textbox(label='生成的 Prompt')
output_w14 = gr.Textbox(label='W14的 Prompt')
with gr.Accordion('W14', open=False):
w14_raw_output = gr.Textbox(label="Output (raw string)")
w14_booru_output = gr.Textbox(label="Output (booru string)")
w14_rating_output = gr.Label(label="Rating")
w14_characters_output = gr.Label(label="Output (characters)")
w14_tags_output = gr.Label(label="Output (tags)")
images_generate_prompter_output = gr.Textbox(lines=6, label='SD优化的 Prompt')
with gr.Row():
img_exif_btn = gr.Button('EXIF')
img_blip_btn = gr.Button('BLIP图片转描述')
img_w14_btn = gr.Button('W14图片转描述')
img_clip_btn = gr.Button('CLIP图片转描述')
img_prompter_btn = gr.Button('SD优化')
with gr.Tab('文本生成'):
with gr.Row():
input_text = gr.Textbox(lines=6, label='你的想法', placeholder='在此输入内容...')
translate_output = gr.Textbox(lines=6, label='翻译结果(Prompt输入)')
generate_prompter_output = gr.Textbox(lines=6, label='SD优化的 Prompt')
output = gr.Textbox(lines=6, label='瞎编的 Prompt')
output_zh = gr.Textbox(lines=6, label='瞎编的 Prompt(zh)')
with gr.Row():
translate_btn = gr.Button('翻译')
generate_prompter_btn = gr.Button('SD优化')
gpt_btn = gr.Button('瞎编')
with gr.Tab('参数设置'):
with gr.Accordion('SD优化参数', open=True):
max_new_tokens = gr.Slider(1, 512, 100, label='max_new_tokens', step=1)
nub_beams = gr.Slider(1, 30, 6, label='num_beams', step=1)
num_return_sequences = gr.Slider(1, 30, 6, label='num_return_sequences', step=1)
length_penalty = gr.Slider(-1.0, 1.0, -1.0, label='length_penalty')
with gr.Accordion('BLIP参数', open=True):
blip_max_length = gr.Slider(1, 512, 100, label='max_length', step=1)
with gr.Accordion('CLIP参数', open=True):
clip_mode_type = gr.Radio(['best', 'classic', 'fast', 'negative'], value='best', label='mode_type')
clip_model_name = gr.Radio(['vit_h_14', 'vit_l_14', ], value='vit_h_14', )
with gr.Accordion('WD14参数', open=True):
image2text_model = gr.Radio(
[
"SwinV2",
"ConvNext",
"ConvNextV2",
"ViT",
],
value="ConvNextV2",
label="Model"
)
general_threshold = gr.Slider(
0,
1,
step=0.05,
value=0.35,
label="General Tags Threshold",
)
character_threshold = gr.Slider(
0,
1,
step=0.05,
value=0.85,
label="Character Tags Threshold",
)
img_prompter_btn.click(
fn=image_generate_prompter,
inputs=[output_blip_or_clip, output_w14, max_new_tokens, nub_beams, num_return_sequences, length_penalty],
outputs=images_generate_prompter_output,
)
translate_btn.click(
fn=translate_zh2en,
inputs=input_text,
outputs=translate_output
)
generate_prompter_btn.click(
fn=generate_prompter,
inputs=[translate_output, max_new_tokens, nub_beams, num_return_sequences, length_penalty],
outputs=generate_prompter_output
)
gpt_btn.click(
fn=text_generate,
inputs=translate_output,
outputs=[output, output_zh]
)
img_w14_btn.click(
fn=w14_image2text,
inputs=[input_image, image2text_model, general_threshold, character_threshold],
outputs=[
output_w14,
w14_raw_output,
w14_booru_output,
w14_rating_output,
w14_characters_output,
w14_tags_output
]
)
img_blip_btn.click(
fn=git_image2text,
inputs=[input_image, blip_max_length],
outputs=output_blip_or_clip
)
img_clip_btn.click(
fn=clip_image2text,
inputs=[input_image, clip_mode_type, clip_model_name],
outputs=output_blip_or_clip
)
img_exif_btn.click(
fn=get_image_info,
inputs=input_image,
outputs=exif_info
)
block.queue(max_size=64).launch(show_api=False, enable_queue=True, debug=True, share=False, server_name='0.0.0.0')