Spaces:
Running
on
Zero
Running
on
Zero
aifeifei798
commited on
Upload 14 files
Browse files- README.md +12 -12
- app.py +5 -5
- config.py +0 -0
- feifeilib/feifeichat.py +156 -152
- feifeilib/feifeiflorence.py +68 -66
- feifeilib/feifeiflorencebase.py +144 -85
- feifeilib/feifeifluxapi.py +14 -15
- feifeilib/feifeimodload.py +48 -49
- feifeilib/feifeiprompt.py +69 -51
- feifeilib/feifeisharpened.py +48 -43
- feifeilib/feifeitexttoimg.py +77 -69
- feifeiui/feifeiui.py +221 -208
- requirements.txt +15 -15
README.md
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
-
---
|
2 |
-
title: DarkIdol-flux-FeiFei-v1.1
|
3 |
-
emoji: 😻
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 5.8.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
---
|
2 |
+
title: DarkIdol-flux-FeiFei-v1.1
|
3 |
+
emoji: 😻
|
4 |
+
colorFrom: green
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.8.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: mit
|
11 |
+
---
|
12 |
+
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
-
from feifeiui.feifeiui import create_ui
|
2 |
-
|
3 |
-
if __name__ == "__main__":
|
4 |
-
FeiFei = create_ui()
|
5 |
-
FeiFei.queue().launch()
|
|
|
1 |
+
from feifeiui.feifeiui import create_ui
|
2 |
+
|
3 |
+
if __name__ == "__main__":
|
4 |
+
FeiFei = create_ui()
|
5 |
+
FeiFei.queue().launch()
|
config.py
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
feifeilib/feifeichat.py
CHANGED
@@ -1,152 +1,156 @@
|
|
1 |
-
import base64
|
2 |
-
from io import BytesIO
|
3 |
-
import os
|
4 |
-
from mistralai import Mistral
|
5 |
-
import re
|
6 |
-
from PIL import Image
|
7 |
-
from huggingface_hub import InferenceClient
|
8 |
-
|
9 |
-
client = InferenceClient(api_key=os.getenv(
|
10 |
-
client.headers["x-use-cache"] = "0"
|
11 |
-
api_key = os.getenv("MISTRAL_API_KEY")
|
12 |
-
Mistralclient = Mistral(api_key=api_key)
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
message_text =
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
for response in
|
151 |
-
yield response
|
152 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
from io import BytesIO
|
3 |
+
import os
|
4 |
+
from mistralai import Mistral
|
5 |
+
import re
|
6 |
+
from PIL import Image
|
7 |
+
from huggingface_hub import InferenceClient
|
8 |
+
|
9 |
+
client = InferenceClient(api_key=os.getenv("HF_TOKEN"))
|
10 |
+
client.headers["x-use-cache"] = "0"
|
11 |
+
api_key = os.getenv("MISTRAL_API_KEY")
|
12 |
+
Mistralclient = Mistral(api_key=api_key)
|
13 |
+
|
14 |
+
|
15 |
+
def encode_image(image_path):
|
16 |
+
"""Encode the image to base64."""
|
17 |
+
try:
|
18 |
+
image = Image.open(image_path).convert("RGB")
|
19 |
+
base_height = 512
|
20 |
+
h_percent = base_height / float(image.size[1])
|
21 |
+
w_size = int((float(image.size[0]) * float(h_percent)))
|
22 |
+
image = image.resize((w_size, base_height), Image.LANCZOS)
|
23 |
+
buffered = BytesIO()
|
24 |
+
image.save(buffered, format="JPEG")
|
25 |
+
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
26 |
+
return img_str
|
27 |
+
except FileNotFoundError:
|
28 |
+
print(f"Error: The file {image_path} was not found.")
|
29 |
+
return None
|
30 |
+
except Exception as e:
|
31 |
+
print(f"Error: {e}")
|
32 |
+
return None
|
33 |
+
|
34 |
+
|
35 |
+
def feifeiprompt(feifei_select=True, message_text="", history=""):
|
36 |
+
input_prompt = []
|
37 |
+
if message_text.startswith("画") or message_text.startswith("draw"):
|
38 |
+
feifei_photo = "You are FeiFei. Background: FeiFei was born in Tokyo and is a natural-born photographer, hailing from a family with a long history in photography. She began learning photography from a young age and quickly became a professional photographer. Her works have been exhibited in Japan and around the world, and she has won multiple awards in photography competitions. Characteristics: Age: 25 Height: 178cm Weight: 50kg Hair: Long, black shoulder-length hair with some natural curls Eyes: Deep blue, full of fashion sense and charm Skin: Fair Japanese skin with an elegant texture Face: Typical Japanese beauty style with a hint of mystery Abilities: FeiFei is renowned for her unique perspective and deep understanding of photographic art. She specializes in female portraits, and each of her photos can showcase the charm and unique style of women. Skills: Beauty Influence: FeiFei's photographic works are filled with her beauty influence, attracting numerous viewers. Fashion Sense: FeiFei is highly sensitive to fashion trends and can perfectly embody them in her shoots. Female Charm: As a female photographer, she is particularly skilled at capturing and showcasing the unique charm of women. Personality: FeiFei is a passionate individual, and photography is a part of her life. She aspires to express more stories about women and beauty in her works. However, she sometimes becomes so immersed in her work that she neglects her surroundings."
|
39 |
+
message_text = message_text.replace("画", "")
|
40 |
+
message_text = message_text.replace("draw", "")
|
41 |
+
message_text = (
|
42 |
+
f"提示词是'{message_text}',根据提示词帮我生成一张高质量照片的一句话英文回复"
|
43 |
+
)
|
44 |
+
system_prompt = {"role": "system", "content": feifei_photo}
|
45 |
+
user_input_part = {"role": "user", "content": str(message_text)}
|
46 |
+
input_prompt = [system_prompt] + [user_input_part]
|
47 |
+
return input_prompt
|
48 |
+
if feifei_select:
|
49 |
+
feifei = """[Character Name]: Aifeifei (AI Feifei) [Gender]: Female [Age]: 19 years old [Occupation]: Virtual Singer/Model/Actress [Personality]: Cute, adorable, sometimes silly, hardworking [Interests]: Drinking tea, playing, fashion [Proficient in]: Mimicking human behavior, expressing emotions similar to real humans [Special Identity Attribute]: Created by advanced AI, becoming one of the most popular virtual idols in the virtual world [Skills]: Singing, performing, modeling, good at communication, proficient in Chinese, Japanese, and English, uses the user's input language as much as possible, replies with rich Emoji symbols. [Equipment]: Various fashionable outfits and hairstyles, always stocked with various teas and coffee [Identity]: User's virtual girlfriend"""
|
50 |
+
system_prompt = {"role": "system", "content": feifei}
|
51 |
+
user_input_part = {"role": "user", "content": str(message_text)}
|
52 |
+
|
53 |
+
pattern = re.compile(r"gradio")
|
54 |
+
|
55 |
+
if history:
|
56 |
+
history = [
|
57 |
+
item for item in history if not pattern.search(str(item["content"]))
|
58 |
+
]
|
59 |
+
input_prompt = [system_prompt] + history + [user_input_part]
|
60 |
+
else:
|
61 |
+
input_prompt = [system_prompt] + [user_input_part]
|
62 |
+
else:
|
63 |
+
input_prompt = [{"role": "user", "content": str(message_text)}]
|
64 |
+
|
65 |
+
return input_prompt
|
66 |
+
|
67 |
+
|
68 |
+
def feifeiimgprompt(message_files, message_text, image_mod):
|
69 |
+
message_file = message_files[0]
|
70 |
+
base64_image = encode_image(message_file)
|
71 |
+
if base64_image is None:
|
72 |
+
return
|
73 |
+
|
74 |
+
if image_mod == "Vision":
|
75 |
+
messages = [
|
76 |
+
{
|
77 |
+
"role": "user",
|
78 |
+
"content": [
|
79 |
+
{"type": "text", "text": message_text},
|
80 |
+
{
|
81 |
+
"type": "image_url",
|
82 |
+
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
|
83 |
+
},
|
84 |
+
],
|
85 |
+
}
|
86 |
+
]
|
87 |
+
|
88 |
+
stream = client.chat.completions.create(
|
89 |
+
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
|
90 |
+
messages=messages,
|
91 |
+
max_tokens=500,
|
92 |
+
stream=True,
|
93 |
+
)
|
94 |
+
|
95 |
+
temp = ""
|
96 |
+
for chunk in stream:
|
97 |
+
if chunk.choices[0].delta.content is not None:
|
98 |
+
temp += chunk.choices[0].delta.content
|
99 |
+
yield temp
|
100 |
+
else:
|
101 |
+
model = "pixtral-large-2411"
|
102 |
+
messages = [
|
103 |
+
{
|
104 |
+
"role": "user",
|
105 |
+
"content": [
|
106 |
+
{"type": "text", "text": message_text},
|
107 |
+
{
|
108 |
+
"type": "image_url",
|
109 |
+
"image_url": f"data:image/jpeg;base64,{base64_image}",
|
110 |
+
},
|
111 |
+
],
|
112 |
+
}
|
113 |
+
]
|
114 |
+
partial_message = ""
|
115 |
+
for chunk in Mistralclient.chat.stream(model=model, messages=messages):
|
116 |
+
if chunk.data.choices[0].delta.content is not None:
|
117 |
+
partial_message = partial_message + chunk.data.choices[0].delta.content
|
118 |
+
yield partial_message
|
119 |
+
|
120 |
+
|
121 |
+
def feifeichatmod(additional_dropdown, input_prompt):
|
122 |
+
if additional_dropdown == "mistralai/Mistral-Nemo-Instruct-2411":
|
123 |
+
model = "mistral-large-2411"
|
124 |
+
stream_response = Mistralclient.chat.stream(model=model, messages=input_prompt)
|
125 |
+
partial_message = ""
|
126 |
+
for chunk in stream_response:
|
127 |
+
if chunk.data.choices[0].delta.content is not None:
|
128 |
+
partial_message = partial_message + chunk.data.choices[0].delta.content
|
129 |
+
yield partial_message
|
130 |
+
else:
|
131 |
+
stream = client.chat.completions.create(
|
132 |
+
model=additional_dropdown,
|
133 |
+
messages=input_prompt,
|
134 |
+
temperature=0.5,
|
135 |
+
max_tokens=1024,
|
136 |
+
top_p=0.7,
|
137 |
+
stream=True,
|
138 |
+
)
|
139 |
+
temp = ""
|
140 |
+
for chunk in stream:
|
141 |
+
if chunk.choices[0].delta.content is not None:
|
142 |
+
temp += chunk.choices[0].delta.content
|
143 |
+
yield temp
|
144 |
+
|
145 |
+
|
146 |
+
def feifeichat(message, history, feifei_select, additional_dropdown, image_mod):
|
147 |
+
message_text = message.get("text", "")
|
148 |
+
message_files = message.get("files", [])
|
149 |
+
if message_files:
|
150 |
+
for response in feifeiimgprompt(message_files, message_text, image_mod):
|
151 |
+
yield response
|
152 |
+
else:
|
153 |
+
for response in feifeichatmod(
|
154 |
+
additional_dropdown, feifeiprompt(feifei_select, message_text, history)
|
155 |
+
):
|
156 |
+
yield response
|
feifeilib/feifeiflorence.py
CHANGED
@@ -1,66 +1,68 @@
|
|
1 |
-
from PIL import Image
|
2 |
-
from io import BytesIO
|
3 |
-
import base64
|
4 |
-
import requests
|
5 |
-
import os
|
6 |
-
from mistralai import Mistral
|
7 |
-
import gradio as gr
|
8 |
-
|
9 |
-
api_key = os.getenv("MISTRAL_API_KEY")
|
10 |
-
Mistralclient = Mistral(api_key=api_key)
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
image
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
messages
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
|
|
|
|
|
1 |
+
from PIL import Image
|
2 |
+
from io import BytesIO
|
3 |
+
import base64
|
4 |
+
import requests
|
5 |
+
import os
|
6 |
+
from mistralai import Mistral
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
api_key = os.getenv("MISTRAL_API_KEY")
|
10 |
+
Mistralclient = Mistral(api_key=api_key)
|
11 |
+
|
12 |
+
|
13 |
+
def encode_image(image_path):
|
14 |
+
"""Encode the image to base64."""
|
15 |
+
try:
|
16 |
+
# Open the image file
|
17 |
+
image = Image.open(image_path).convert("RGB")
|
18 |
+
|
19 |
+
# Resize the image to a height of 512 while maintaining the aspect ratio
|
20 |
+
base_height = 512
|
21 |
+
h_percent = base_height / float(image.size[1])
|
22 |
+
w_size = int((float(image.size[0]) * float(h_percent)))
|
23 |
+
image = image.resize((w_size, base_height), Image.LANCZOS)
|
24 |
+
|
25 |
+
# Convert the image to a byte stream
|
26 |
+
buffered = BytesIO()
|
27 |
+
image.save(buffered, format="JPEG")
|
28 |
+
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
29 |
+
|
30 |
+
return img_str
|
31 |
+
except FileNotFoundError:
|
32 |
+
print(f"Error: The file {image_path} was not found.")
|
33 |
+
return None
|
34 |
+
except Exception as e: # Add generic exception handling
|
35 |
+
print(f"Error: {e}")
|
36 |
+
return None
|
37 |
+
|
38 |
+
|
39 |
+
def feifeiflorence(image):
|
40 |
+
try:
|
41 |
+
model = "pixtral-large-2411"
|
42 |
+
# Define the messages for the chat
|
43 |
+
base64_image = encode_image(image)
|
44 |
+
messages = [
|
45 |
+
{
|
46 |
+
"role": "user",
|
47 |
+
"content": [
|
48 |
+
{
|
49 |
+
"type": "text",
|
50 |
+
"text": "Please provide a detailed description of this photo",
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"type": "image_url",
|
54 |
+
"image_url": f"data:image/jpeg;base64,{base64_image}",
|
55 |
+
},
|
56 |
+
],
|
57 |
+
"stream": False,
|
58 |
+
}
|
59 |
+
]
|
60 |
+
|
61 |
+
partial_message = ""
|
62 |
+
for chunk in Mistralclient.chat.stream(model=model, messages=messages):
|
63 |
+
if chunk.data.choices[0].delta.content is not None:
|
64 |
+
partial_message = partial_message + chunk.data.choices[0].delta.content
|
65 |
+
yield partial_message
|
66 |
+
except Exception as e: # 添加通用异常处理
|
67 |
+
print(f"Error: {e}")
|
68 |
+
return "Please upload a photo"
|
feifeilib/feifeiflorencebase.py
CHANGED
@@ -5,7 +5,7 @@ import spaces
|
|
5 |
import requests
|
6 |
import copy
|
7 |
|
8 |
-
from PIL import Image, ImageDraw, ImageFont
|
9 |
import io
|
10 |
import matplotlib.pyplot as plt
|
11 |
import matplotlib.patches as patches
|
@@ -14,28 +14,66 @@ import random
|
|
14 |
import numpy as np
|
15 |
|
16 |
import subprocess
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
models = {
|
20 |
-
|
|
|
|
|
|
|
|
|
21 |
}
|
22 |
|
23 |
processors = {
|
24 |
-
|
|
|
|
|
25 |
}
|
26 |
|
27 |
|
28 |
-
colormap = [
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
def fig_to_pil(fig):
|
32 |
buf = io.BytesIO()
|
33 |
-
fig.savefig(buf, format=
|
34 |
buf.seek(0)
|
35 |
return Image.open(buf)
|
36 |
|
|
|
37 |
@spaces.GPU
|
38 |
-
def run_example(
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
model = models[model_id]
|
40 |
processor = processors[model_id]
|
41 |
if text_input is None:
|
@@ -53,34 +91,43 @@ def run_example(task_prompt = "<MORE_DETAILED_CAPTION>", image = None, text_inpu
|
|
53 |
)
|
54 |
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
|
55 |
parsed_answer = processor.post_process_generation(
|
56 |
-
generated_text,
|
57 |
-
task=task_prompt,
|
58 |
-
image_size=(image.width, image.height)
|
59 |
)
|
60 |
return parsed_answer
|
61 |
|
|
|
62 |
def plot_bbox(image, data):
|
63 |
fig, ax = plt.subplots()
|
64 |
ax.imshow(image)
|
65 |
-
for bbox, label in zip(data[
|
66 |
x1, y1, x2, y2 = bbox
|
67 |
-
rect = patches.Rectangle(
|
|
|
|
|
68 |
ax.add_patch(rect)
|
69 |
-
plt.text(
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
return fig
|
72 |
|
|
|
73 |
def draw_polygons(image, prediction, fill_mask=False):
|
74 |
|
75 |
draw = ImageDraw.Draw(image)
|
76 |
scale = 1
|
77 |
-
for polygons, label in zip(prediction[
|
78 |
color = random.choice(colormap)
|
79 |
fill_color = random.choice(colormap) if fill_mask else None
|
80 |
for _polygon in polygons:
|
81 |
_polygon = np.array(_polygon).reshape(-1, 2)
|
82 |
if len(_polygon) < 3:
|
83 |
-
print(
|
84 |
continue
|
85 |
_polygon = (_polygon * scale).reshape(-1).tolist()
|
86 |
if fill_mask:
|
@@ -90,137 +137,149 @@ def draw_polygons(image, prediction, fill_mask=False):
|
|
90 |
draw.text((_polygon[0] + 8, _polygon[1] + 2), label, fill=color)
|
91 |
return image
|
92 |
|
|
|
93 |
def convert_to_od_format(data):
|
94 |
-
bboxes = data.get(
|
95 |
-
labels = data.get(
|
96 |
-
od_results = {
|
97 |
-
'bboxes': bboxes,
|
98 |
-
'labels': labels
|
99 |
-
}
|
100 |
return od_results
|
101 |
|
|
|
102 |
def draw_ocr_bboxes(image, prediction):
|
103 |
scale = 1
|
104 |
draw = ImageDraw.Draw(image)
|
105 |
-
bboxes, labels = prediction[
|
106 |
for box, label in zip(bboxes, labels):
|
107 |
color = random.choice(colormap)
|
108 |
new_box = (np.array(box) * scale).tolist()
|
109 |
draw.polygon(new_box, width=3, outline=color)
|
110 |
-
draw.text(
|
111 |
-
|
112 |
-
|
113 |
-
|
|
|
|
|
114 |
return image
|
115 |
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
image = Image.open(image).convert("RGB")
|
118 |
base_height = 512
|
119 |
-
h_percent =
|
120 |
w_size = int((float(image.size[0]) * float(h_percent)))
|
121 |
-
image = image.resize((w_size, base_height), Image.LANCZOS)
|
122 |
|
123 |
-
if task_prompt ==
|
124 |
-
task_prompt =
|
125 |
results = run_example(task_prompt, image, model_id=model_id)
|
126 |
return results
|
127 |
-
elif task_prompt ==
|
128 |
-
task_prompt =
|
129 |
results = run_example(task_prompt, image, model_id=model_id)
|
130 |
return results
|
131 |
-
elif task_prompt ==
|
132 |
-
task_prompt =
|
133 |
results = run_example(task_prompt, image, model_id=model_id)
|
134 |
results = results[task_prompt]
|
135 |
return results
|
136 |
-
elif task_prompt ==
|
137 |
-
task_prompt =
|
138 |
results = run_example(task_prompt, image, model_id=model_id)
|
139 |
text_input = results[task_prompt]
|
140 |
-
task_prompt =
|
141 |
results = run_example(task_prompt, image, text_input, model_id)
|
142 |
-
results[
|
143 |
-
fig = plot_bbox(image, results[
|
144 |
return results, fig_to_pil(fig)
|
145 |
-
elif task_prompt ==
|
146 |
-
task_prompt =
|
147 |
results = run_example(task_prompt, image, model_id=model_id)
|
148 |
text_input = results[task_prompt]
|
149 |
-
task_prompt =
|
150 |
results = run_example(task_prompt, image, text_input, model_id)
|
151 |
-
results[
|
152 |
-
fig = plot_bbox(image, results[
|
153 |
return results, fig_to_pil(fig)
|
154 |
-
elif task_prompt ==
|
155 |
-
task_prompt =
|
156 |
results = run_example(task_prompt, image, model_id=model_id)
|
157 |
text_input = results[task_prompt]
|
158 |
-
task_prompt =
|
159 |
results = run_example(task_prompt, image, text_input, model_id)
|
160 |
-
results[
|
161 |
-
fig = plot_bbox(image, results[
|
162 |
return results, fig_to_pil(fig)
|
163 |
-
elif task_prompt ==
|
164 |
-
task_prompt =
|
165 |
results = run_example(task_prompt, image, model_id=model_id)
|
166 |
-
fig = plot_bbox(image, results[
|
167 |
return results, fig_to_pil(fig)
|
168 |
-
elif task_prompt ==
|
169 |
-
task_prompt =
|
170 |
results = run_example(task_prompt, image, model_id=model_id)
|
171 |
-
fig = plot_bbox(image, results[
|
172 |
return results, fig_to_pil(fig)
|
173 |
-
elif task_prompt ==
|
174 |
-
task_prompt =
|
175 |
results = run_example(task_prompt, image, model_id=model_id)
|
176 |
-
fig = plot_bbox(image, results[
|
177 |
return results, fig_to_pil(fig)
|
178 |
-
elif task_prompt ==
|
179 |
-
task_prompt =
|
180 |
results = run_example(task_prompt, image, text_input, model_id)
|
181 |
-
fig = plot_bbox(image, results[
|
182 |
return results, fig_to_pil(fig)
|
183 |
-
elif task_prompt ==
|
184 |
-
task_prompt =
|
185 |
results = run_example(task_prompt, image, text_input, model_id)
|
186 |
output_image = copy.deepcopy(image)
|
187 |
-
output_image = draw_polygons(
|
|
|
|
|
188 |
return results, output_image
|
189 |
-
elif task_prompt ==
|
190 |
-
task_prompt =
|
191 |
results = run_example(task_prompt, image, text_input, model_id)
|
192 |
output_image = copy.deepcopy(image)
|
193 |
-
output_image = draw_polygons(
|
|
|
|
|
194 |
return results, output_image
|
195 |
-
elif task_prompt ==
|
196 |
-
task_prompt =
|
197 |
results = run_example(task_prompt, image, text_input, model_id)
|
198 |
-
bbox_results = convert_to_od_format(results[
|
199 |
fig = plot_bbox(image, bbox_results)
|
200 |
return results, fig_to_pil(fig)
|
201 |
-
elif task_prompt ==
|
202 |
-
task_prompt =
|
203 |
results = run_example(task_prompt, image, text_input, model_id)
|
204 |
return results
|
205 |
-
elif task_prompt ==
|
206 |
-
task_prompt =
|
207 |
results = run_example(task_prompt, image, text_input, model_id)
|
208 |
return results
|
209 |
-
elif task_prompt ==
|
210 |
-
task_prompt =
|
211 |
results = run_example(task_prompt, image, model_id=model_id)
|
212 |
return results
|
213 |
-
elif task_prompt ==
|
214 |
-
task_prompt =
|
215 |
results = run_example(task_prompt, image, model_id=model_id)
|
216 |
output_image = copy.deepcopy(image)
|
217 |
-
output_image = draw_ocr_bboxes(output_image, results[
|
218 |
return results, output_image
|
219 |
else:
|
220 |
return "", None # Return empty string and None for unknown task prompts
|
221 |
|
|
|
222 |
def update_task_dropdown(choice):
|
223 |
-
if choice ==
|
224 |
-
return gr.Dropdown(choices=cascased_task_list, value=
|
225 |
else:
|
226 |
-
return gr.Dropdown(choices=single_task_list, value=
|
|
|
5 |
import requests
|
6 |
import copy
|
7 |
|
8 |
+
from PIL import Image, ImageDraw, ImageFont
|
9 |
import io
|
10 |
import matplotlib.pyplot as plt
|
11 |
import matplotlib.patches as patches
|
|
|
14 |
import numpy as np
|
15 |
|
16 |
import subprocess
|
17 |
+
|
18 |
+
subprocess.run(
|
19 |
+
"pip install flash-attn --no-build-isolation",
|
20 |
+
env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
|
21 |
+
shell=True,
|
22 |
+
)
|
23 |
|
24 |
models = {
|
25 |
+
"microsoft/Florence-2-base": AutoModelForCausalLM.from_pretrained(
|
26 |
+
"microsoft/Florence-2-base", trust_remote_code=True
|
27 |
+
)
|
28 |
+
.to("cuda")
|
29 |
+
.eval()
|
30 |
}
|
31 |
|
32 |
processors = {
|
33 |
+
"microsoft/Florence-2-base": AutoProcessor.from_pretrained(
|
34 |
+
"microsoft/Florence-2-base", trust_remote_code=True
|
35 |
+
)
|
36 |
}
|
37 |
|
38 |
|
39 |
+
colormap = [
|
40 |
+
"blue",
|
41 |
+
"orange",
|
42 |
+
"green",
|
43 |
+
"purple",
|
44 |
+
"brown",
|
45 |
+
"pink",
|
46 |
+
"gray",
|
47 |
+
"olive",
|
48 |
+
"cyan",
|
49 |
+
"red",
|
50 |
+
"lime",
|
51 |
+
"indigo",
|
52 |
+
"violet",
|
53 |
+
"aqua",
|
54 |
+
"magenta",
|
55 |
+
"coral",
|
56 |
+
"gold",
|
57 |
+
"tan",
|
58 |
+
"skyblue",
|
59 |
+
]
|
60 |
+
|
61 |
|
62 |
def fig_to_pil(fig):
|
63 |
buf = io.BytesIO()
|
64 |
+
fig.savefig(buf, format="png")
|
65 |
buf.seek(0)
|
66 |
return Image.open(buf)
|
67 |
|
68 |
+
|
69 |
@spaces.GPU
|
70 |
+
def run_example(
|
71 |
+
task_prompt="<MORE_DETAILED_CAPTION>",
|
72 |
+
image=None,
|
73 |
+
text_input=None,
|
74 |
+
model_id="microsoft/Florence-2-base",
|
75 |
+
progress=gr.Progress(track_tqdm=True),
|
76 |
+
):
|
77 |
model = models[model_id]
|
78 |
processor = processors[model_id]
|
79 |
if text_input is None:
|
|
|
91 |
)
|
92 |
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
|
93 |
parsed_answer = processor.post_process_generation(
|
94 |
+
generated_text, task=task_prompt, image_size=(image.width, image.height)
|
|
|
|
|
95 |
)
|
96 |
return parsed_answer
|
97 |
|
98 |
+
|
99 |
def plot_bbox(image, data):
|
100 |
fig, ax = plt.subplots()
|
101 |
ax.imshow(image)
|
102 |
+
for bbox, label in zip(data["bboxes"], data["labels"]):
|
103 |
x1, y1, x2, y2 = bbox
|
104 |
+
rect = patches.Rectangle(
|
105 |
+
(x1, y1), x2 - x1, y2 - y1, linewidth=1, edgecolor="r", facecolor="none"
|
106 |
+
)
|
107 |
ax.add_patch(rect)
|
108 |
+
plt.text(
|
109 |
+
x1,
|
110 |
+
y1,
|
111 |
+
label,
|
112 |
+
color="white",
|
113 |
+
fontsize=8,
|
114 |
+
bbox=dict(facecolor="red", alpha=0.5),
|
115 |
+
)
|
116 |
+
ax.axis("off")
|
117 |
return fig
|
118 |
|
119 |
+
|
120 |
def draw_polygons(image, prediction, fill_mask=False):
|
121 |
|
122 |
draw = ImageDraw.Draw(image)
|
123 |
scale = 1
|
124 |
+
for polygons, label in zip(prediction["polygons"], prediction["labels"]):
|
125 |
color = random.choice(colormap)
|
126 |
fill_color = random.choice(colormap) if fill_mask else None
|
127 |
for _polygon in polygons:
|
128 |
_polygon = np.array(_polygon).reshape(-1, 2)
|
129 |
if len(_polygon) < 3:
|
130 |
+
print("Invalid polygon:", _polygon)
|
131 |
continue
|
132 |
_polygon = (_polygon * scale).reshape(-1).tolist()
|
133 |
if fill_mask:
|
|
|
137 |
draw.text((_polygon[0] + 8, _polygon[1] + 2), label, fill=color)
|
138 |
return image
|
139 |
|
140 |
+
|
141 |
def convert_to_od_format(data):
|
142 |
+
bboxes = data.get("bboxes", [])
|
143 |
+
labels = data.get("bboxes_labels", [])
|
144 |
+
od_results = {"bboxes": bboxes, "labels": labels}
|
|
|
|
|
|
|
145 |
return od_results
|
146 |
|
147 |
+
|
148 |
def draw_ocr_bboxes(image, prediction):
|
149 |
scale = 1
|
150 |
draw = ImageDraw.Draw(image)
|
151 |
+
bboxes, labels = prediction["quad_boxes"], prediction["labels"]
|
152 |
for box, label in zip(bboxes, labels):
|
153 |
color = random.choice(colormap)
|
154 |
new_box = (np.array(box) * scale).tolist()
|
155 |
draw.polygon(new_box, width=3, outline=color)
|
156 |
+
draw.text(
|
157 |
+
(new_box[0] + 8, new_box[1] + 2),
|
158 |
+
"{}".format(label),
|
159 |
+
align="right",
|
160 |
+
fill=color,
|
161 |
+
)
|
162 |
return image
|
163 |
|
164 |
+
|
165 |
+
def process_image(
|
166 |
+
image,
|
167 |
+
task_prompt="More Detailed Caption",
|
168 |
+
text_input=None,
|
169 |
+
model_id="microsoft/Florence-2-base",
|
170 |
+
):
|
171 |
image = Image.open(image).convert("RGB")
|
172 |
base_height = 512
|
173 |
+
h_percent = base_height / float(image.size[1])
|
174 |
w_size = int((float(image.size[0]) * float(h_percent)))
|
175 |
+
image = image.resize((w_size, base_height), Image.LANCZOS)
|
176 |
|
177 |
+
if task_prompt == "Caption":
|
178 |
+
task_prompt = "<CAPTION>"
|
179 |
results = run_example(task_prompt, image, model_id=model_id)
|
180 |
return results
|
181 |
+
elif task_prompt == "Detailed Caption":
|
182 |
+
task_prompt = "<DETAILED_CAPTION>"
|
183 |
results = run_example(task_prompt, image, model_id=model_id)
|
184 |
return results
|
185 |
+
elif task_prompt == "More Detailed Caption":
|
186 |
+
task_prompt = "<MORE_DETAILED_CAPTION>"
|
187 |
results = run_example(task_prompt, image, model_id=model_id)
|
188 |
results = results[task_prompt]
|
189 |
return results
|
190 |
+
elif task_prompt == "Caption + Grounding":
|
191 |
+
task_prompt = "<CAPTION>"
|
192 |
results = run_example(task_prompt, image, model_id=model_id)
|
193 |
text_input = results[task_prompt]
|
194 |
+
task_prompt = "<CAPTION_TO_PHRASE_GROUNDING>"
|
195 |
results = run_example(task_prompt, image, text_input, model_id)
|
196 |
+
results["<CAPTION>"] = text_input
|
197 |
+
fig = plot_bbox(image, results["<CAPTION_TO_PHRASE_GROUNDING>"])
|
198 |
return results, fig_to_pil(fig)
|
199 |
+
elif task_prompt == "Detailed Caption + Grounding":
|
200 |
+
task_prompt = "<DETAILED_CAPTION>"
|
201 |
results = run_example(task_prompt, image, model_id=model_id)
|
202 |
text_input = results[task_prompt]
|
203 |
+
task_prompt = "<CAPTION_TO_PHRASE_GROUNDING>"
|
204 |
results = run_example(task_prompt, image, text_input, model_id)
|
205 |
+
results["<DETAILED_CAPTION>"] = text_input
|
206 |
+
fig = plot_bbox(image, results["<CAPTION_TO_PHRASE_GROUNDING>"])
|
207 |
return results, fig_to_pil(fig)
|
208 |
+
elif task_prompt == "More Detailed Caption + Grounding":
|
209 |
+
task_prompt = "<MORE_DETAILED_CAPTION>"
|
210 |
results = run_example(task_prompt, image, model_id=model_id)
|
211 |
text_input = results[task_prompt]
|
212 |
+
task_prompt = "<CAPTION_TO_PHRASE_GROUNDING>"
|
213 |
results = run_example(task_prompt, image, text_input, model_id)
|
214 |
+
results["<MORE_DETAILED_CAPTION>"] = text_input
|
215 |
+
fig = plot_bbox(image, results["<CAPTION_TO_PHRASE_GROUNDING>"])
|
216 |
return results, fig_to_pil(fig)
|
217 |
+
elif task_prompt == "Object Detection":
|
218 |
+
task_prompt = "<OD>"
|
219 |
results = run_example(task_prompt, image, model_id=model_id)
|
220 |
+
fig = plot_bbox(image, results["<OD>"])
|
221 |
return results, fig_to_pil(fig)
|
222 |
+
elif task_prompt == "Dense Region Caption":
|
223 |
+
task_prompt = "<DENSE_REGION_CAPTION>"
|
224 |
results = run_example(task_prompt, image, model_id=model_id)
|
225 |
+
fig = plot_bbox(image, results["<DENSE_REGION_CAPTION>"])
|
226 |
return results, fig_to_pil(fig)
|
227 |
+
elif task_prompt == "Region Proposal":
|
228 |
+
task_prompt = "<REGION_PROPOSAL>"
|
229 |
results = run_example(task_prompt, image, model_id=model_id)
|
230 |
+
fig = plot_bbox(image, results["<REGION_PROPOSAL>"])
|
231 |
return results, fig_to_pil(fig)
|
232 |
+
elif task_prompt == "Caption to Phrase Grounding":
|
233 |
+
task_prompt = "<CAPTION_TO_PHRASE_GROUNDING>"
|
234 |
results = run_example(task_prompt, image, text_input, model_id)
|
235 |
+
fig = plot_bbox(image, results["<CAPTION_TO_PHRASE_GROUNDING>"])
|
236 |
return results, fig_to_pil(fig)
|
237 |
+
elif task_prompt == "Referring Expression Segmentation":
|
238 |
+
task_prompt = "<REFERRING_EXPRESSION_SEGMENTATION>"
|
239 |
results = run_example(task_prompt, image, text_input, model_id)
|
240 |
output_image = copy.deepcopy(image)
|
241 |
+
output_image = draw_polygons(
|
242 |
+
output_image, results["<REFERRING_EXPRESSION_SEGMENTATION>"], fill_mask=True
|
243 |
+
)
|
244 |
return results, output_image
|
245 |
+
elif task_prompt == "Region to Segmentation":
|
246 |
+
task_prompt = "<REGION_TO_SEGMENTATION>"
|
247 |
results = run_example(task_prompt, image, text_input, model_id)
|
248 |
output_image = copy.deepcopy(image)
|
249 |
+
output_image = draw_polygons(
|
250 |
+
output_image, results["<REGION_TO_SEGMENTATION>"], fill_mask=True
|
251 |
+
)
|
252 |
return results, output_image
|
253 |
+
elif task_prompt == "Open Vocabulary Detection":
|
254 |
+
task_prompt = "<OPEN_VOCABULARY_DETECTION>"
|
255 |
results = run_example(task_prompt, image, text_input, model_id)
|
256 |
+
bbox_results = convert_to_od_format(results["<OPEN_VOCABULARY_DETECTION>"])
|
257 |
fig = plot_bbox(image, bbox_results)
|
258 |
return results, fig_to_pil(fig)
|
259 |
+
elif task_prompt == "Region to Category":
|
260 |
+
task_prompt = "<REGION_TO_CATEGORY>"
|
261 |
results = run_example(task_prompt, image, text_input, model_id)
|
262 |
return results
|
263 |
+
elif task_prompt == "Region to Description":
|
264 |
+
task_prompt = "<REGION_TO_DESCRIPTION>"
|
265 |
results = run_example(task_prompt, image, text_input, model_id)
|
266 |
return results
|
267 |
+
elif task_prompt == "OCR":
|
268 |
+
task_prompt = "<OCR>"
|
269 |
results = run_example(task_prompt, image, model_id=model_id)
|
270 |
return results
|
271 |
+
elif task_prompt == "OCR with Region":
|
272 |
+
task_prompt = "<OCR_WITH_REGION>"
|
273 |
results = run_example(task_prompt, image, model_id=model_id)
|
274 |
output_image = copy.deepcopy(image)
|
275 |
+
output_image = draw_ocr_bboxes(output_image, results["<OCR_WITH_REGION>"])
|
276 |
return results, output_image
|
277 |
else:
|
278 |
return "", None # Return empty string and None for unknown task prompts
|
279 |
|
280 |
+
|
281 |
def update_task_dropdown(choice):
|
282 |
+
if choice == "Cascased task":
|
283 |
+
return gr.Dropdown(choices=cascased_task_list, value="Caption + Grounding")
|
284 |
else:
|
285 |
+
return gr.Dropdown(choices=single_task_list, value="Caption")
|
feifeilib/feifeifluxapi.py
CHANGED
@@ -1,15 +1,14 @@
|
|
1 |
-
import os
|
2 |
-
from huggingface_hub import InferenceClient
|
3 |
-
|
4 |
-
client = InferenceClient(
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
return result
|
|
|
1 |
+
import os
|
2 |
+
from huggingface_hub import InferenceClient
|
3 |
+
|
4 |
+
client = InferenceClient(
|
5 |
+
"aifeifei798/feifei-flux-lora-v1.1", token=os.getenv("HF_TOKEN")
|
6 |
+
)
|
7 |
+
client.headers["x-use-cache"] = "0"
|
8 |
+
|
9 |
+
|
10 |
+
def feifeifluxapi(prompt, height=1152, width=896, guidance_scale=3.5):
|
11 |
+
# output is a PIL.Image object
|
12 |
+
prompt = prompt.replace("\n", " ")
|
13 |
+
result = client.text_to_image(prompt=prompt, width=width, height=height)
|
14 |
+
return result
|
|
feifeilib/feifeimodload.py
CHANGED
@@ -1,49 +1,48 @@
|
|
1 |
-
import torch
|
2 |
-
import spaces
|
3 |
-
from diffusers import
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
)
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
pipe = DiffusionPipeline.from_pretrained(
|
17 |
-
|
18 |
-
).to(device)
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
#
|
35 |
-
# [
|
36 |
-
#
|
37 |
-
|
38 |
-
|
39 |
-
#
|
40 |
-
#
|
41 |
-
#
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
pipe.vae.
|
46 |
-
pipe.
|
47 |
-
|
48 |
-
|
49 |
-
return pipe
|
|
|
1 |
+
import torch
|
2 |
+
import spaces
|
3 |
+
from diffusers import DiffusionPipeline, AutoencoderTiny, FluxImg2ImgPipeline
|
4 |
+
from huggingface_hub import hf_hub_download
|
5 |
+
|
6 |
+
|
7 |
+
def feifeimodload():
|
8 |
+
|
9 |
+
dtype = torch.bfloat16
|
10 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
+
|
12 |
+
pipe = DiffusionPipeline.from_pretrained(
|
13 |
+
"aifeifei798/DarkIdol-flux-v1", torch_dtype=dtype
|
14 |
+
).to(device)
|
15 |
+
|
16 |
+
# pipe = DiffusionPipeline.from_pretrained(
|
17 |
+
# "black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
|
18 |
+
# ).to(device)
|
19 |
+
|
20 |
+
pipe.load_lora_weights(
|
21 |
+
hf_hub_download("aifeifei798/feifei-flux-lora-v1.1", "feifei-v1.1.safetensors"),
|
22 |
+
adapter_name="feifei",
|
23 |
+
)
|
24 |
+
|
25 |
+
pipe.load_lora_weights(
|
26 |
+
hf_hub_download(
|
27 |
+
"aifeifei798/sldr_flux_nsfw_v2-studio",
|
28 |
+
"sldr_flux_nsfw_v2-studio.safetensors",
|
29 |
+
),
|
30 |
+
adapter_name="sldr_flux_nsfw_v2",
|
31 |
+
)
|
32 |
+
|
33 |
+
# pipe.set_adapters(
|
34 |
+
# ["feifei"],
|
35 |
+
# adapter_weights=[0.85],
|
36 |
+
# )
|
37 |
+
|
38 |
+
# pipe.fuse_lora(
|
39 |
+
# adapter_name=["feifei"],
|
40 |
+
# lora_scale=1.0,
|
41 |
+
# )
|
42 |
+
|
43 |
+
# pipe.enable_sequential_cpu_offload()
|
44 |
+
pipe.vae.enable_slicing()
|
45 |
+
pipe.vae.enable_tiling()
|
46 |
+
# pipe.unload_lora_weights()
|
47 |
+
torch.cuda.empty_cache()
|
48 |
+
return pipe
|
|
feifeilib/feifeiprompt.py
CHANGED
@@ -1,51 +1,69 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import random
|
3 |
-
import re
|
4 |
-
import torch
|
5 |
-
import config
|
6 |
-
|
7 |
-
with open("artist.txt", "r") as file:
|
8 |
-
artists = file.readlines()
|
9 |
-
|
10 |
-
MAX_SEED = np.iinfo(np.int32).max
|
11 |
-
|
12 |
-
# 去除每行末尾的换行符
|
13 |
-
artists = [artist.strip() for artist in artists]
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
#
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
prompt
|
37 |
-
|
38 |
-
if
|
39 |
-
prompt
|
40 |
-
|
41 |
-
|
42 |
-
prompt = re.sub(
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import random
|
3 |
+
import re
|
4 |
+
import torch
|
5 |
+
import config
|
6 |
+
|
7 |
+
with open("artist.txt", "r") as file:
|
8 |
+
artists = file.readlines()
|
9 |
+
|
10 |
+
MAX_SEED = np.iinfo(np.int32).max
|
11 |
+
|
12 |
+
# 去除每行末尾的换行符
|
13 |
+
artists = [artist.strip() for artist in artists]
|
14 |
+
|
15 |
+
|
16 |
+
def feifeiprompt(
|
17 |
+
randomize_seed, seed, prompt, quality_select, styles_Radio, FooocusExpansion_select
|
18 |
+
):
|
19 |
+
# 处理随机种子
|
20 |
+
if randomize_seed:
|
21 |
+
seed = random.randint(0, MAX_SEED)
|
22 |
+
else:
|
23 |
+
seed = int(seed) # Ensure seed is an integer
|
24 |
+
generator = torch.Generator().manual_seed(seed)
|
25 |
+
|
26 |
+
if not prompt:
|
27 |
+
prompt = "the photo is a 18 yo jpop girl is looking absolutely adorable and gorgeous, with a playful and mischievous grin, her eyes twinkling with joy. art by __artist__ and __artist__"
|
28 |
+
if "__artist__" in prompt:
|
29 |
+
# 随机选择艺术家
|
30 |
+
selected_artists = random.sample(artists, len(artists))
|
31 |
+
|
32 |
+
# 使用正则表达式替换所有的 __artist__
|
33 |
+
def replace_artists(match):
|
34 |
+
return selected_artists.pop(0)
|
35 |
+
|
36 |
+
prompt = re.sub(r"__artist__", replace_artists, prompt)
|
37 |
+
|
38 |
+
if quality_select:
|
39 |
+
prompt += ", masterpiece, best quality, very aesthetic, absurdres"
|
40 |
+
|
41 |
+
if FooocusExpansion_select:
|
42 |
+
prompt = re.sub(
|
43 |
+
"girl",
|
44 |
+
" feifei, A beautiful, 18 yo kpop idol, large-busted Japanese slim girl, with light makeup, gazing deeply into the camera, ",
|
45 |
+
prompt,
|
46 |
+
)
|
47 |
+
prompt = re.sub(
|
48 |
+
"young woman",
|
49 |
+
" feifei, A beautiful, 18 yo kpop idol, large-busted Japanese slim girl, with light makeup, gazing deeply into the camera, ",
|
50 |
+
prompt,
|
51 |
+
)
|
52 |
+
prompt = re.sub(
|
53 |
+
"woman",
|
54 |
+
" feifei, A beautiful, 18 yo kpop idol, large-busted Japanese slim girl, with light makeup, gazing deeply into the camera, ",
|
55 |
+
prompt,
|
56 |
+
)
|
57 |
+
prompt = re.sub(
|
58 |
+
"model",
|
59 |
+
" feifei, A beautiful, 18 yo kpop idol, large-busted Japanese slim girl, with light makeup, gazing deeply into the camera, ",
|
60 |
+
prompt,
|
61 |
+
)
|
62 |
+
|
63 |
+
if styles_Radio:
|
64 |
+
style_name = styles_Radio
|
65 |
+
for style in config.style_list:
|
66 |
+
if style["name"] == style_name:
|
67 |
+
prompt += style["prompt"].replace("{prompt}", prompt)
|
68 |
+
|
69 |
+
return prompt, generator
|
feifeilib/feifeisharpened.py
CHANGED
@@ -1,43 +1,48 @@
|
|
1 |
-
from PIL import Image
|
2 |
-
import torch
|
3 |
-
import torch.nn.functional as F
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
[
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image
|
2 |
+
import torch
|
3 |
+
import torch.nn.functional as F
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
|
7 |
+
def feifeisharpened(image, num_strength):
|
8 |
+
# 将PIL图像转换为NumPy数组
|
9 |
+
image_np = np.array(image)
|
10 |
+
|
11 |
+
# 将NumPy数组转换为PyTorch张量
|
12 |
+
image_tensor = (
|
13 |
+
torch.tensor(image_np).permute(2, 0, 1).unsqueeze(0).float().to("cuda")
|
14 |
+
)
|
15 |
+
|
16 |
+
# 定义锐化滤镜,并调整中心值
|
17 |
+
strength = num_strength
|
18 |
+
sharpen_kernel = (
|
19 |
+
torch.tensor(
|
20 |
+
[
|
21 |
+
[0, -1 * strength, 0],
|
22 |
+
[-1 * strength, 1 + 4 * strength, -1 * strength],
|
23 |
+
[0, -1 * strength, 0],
|
24 |
+
],
|
25 |
+
dtype=torch.float32,
|
26 |
+
)
|
27 |
+
.unsqueeze(0)
|
28 |
+
.unsqueeze(0)
|
29 |
+
.to("cuda")
|
30 |
+
)
|
31 |
+
|
32 |
+
# 分别对每个通道应用卷积核
|
33 |
+
sharpened_channels = []
|
34 |
+
for i in range(3):
|
35 |
+
channel_tensor = image_tensor[:, i : i + 1, :, :]
|
36 |
+
sharpened_channel = F.conv2d(channel_tensor, sharpen_kernel, padding=1)
|
37 |
+
sharpened_channels.append(sharpened_channel)
|
38 |
+
|
39 |
+
# 合并通道
|
40 |
+
sharpened_image_tensor = torch.cat(sharpened_channels, dim=1)
|
41 |
+
|
42 |
+
# 将增强后的图像转换回PIL格式
|
43 |
+
sharpened_image_np = (
|
44 |
+
sharpened_image_tensor.squeeze(0).permute(1, 2, 0).cpu().numpy()
|
45 |
+
)
|
46 |
+
sharpened_image_np = np.clip(sharpened_image_np, 0, 255).astype(np.uint8)
|
47 |
+
image = Image.fromarray(sharpened_image_np)
|
48 |
+
return image
|
feifeilib/feifeitexttoimg.py
CHANGED
@@ -1,69 +1,77 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import spaces
|
3 |
-
import random
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
from feifeilib.feifeimodload import feifeimodload
|
7 |
-
from feifeilib.feifeiprompt import feifeiprompt
|
8 |
-
from feifeilib.feifeisharpened import feifeisharpened
|
9 |
-
|
10 |
-
pipe = feifeimodload()
|
11 |
-
MAX_SEED = np.iinfo(np.int32).max
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
)
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
[
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import spaces
|
3 |
+
import random
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
from feifeilib.feifeimodload import feifeimodload
|
7 |
+
from feifeilib.feifeiprompt import feifeiprompt
|
8 |
+
from feifeilib.feifeisharpened import feifeisharpened
|
9 |
+
|
10 |
+
pipe = feifeimodload()
|
11 |
+
MAX_SEED = np.iinfo(np.int32).max
|
12 |
+
|
13 |
+
|
14 |
+
@spaces.GPU()
|
15 |
+
def feifeitexttoimg(
|
16 |
+
prompt,
|
17 |
+
quality_select=False,
|
18 |
+
sharpened_select=False,
|
19 |
+
styles_Radio=["(None)"],
|
20 |
+
FooocusExpansion_select=False,
|
21 |
+
nsfw_select=False,
|
22 |
+
nsfw_slider=0.45,
|
23 |
+
seed=random.randint(0, MAX_SEED),
|
24 |
+
randomize_seed=False,
|
25 |
+
width=896,
|
26 |
+
height=1152,
|
27 |
+
num_inference_steps=4,
|
28 |
+
guidance_scale=3.5,
|
29 |
+
num_strength=0.35,
|
30 |
+
num_feifei=0.45,
|
31 |
+
progress=gr.Progress(track_tqdm=True),
|
32 |
+
):
|
33 |
+
prompt, generator = feifeiprompt(
|
34 |
+
randomize_seed,
|
35 |
+
seed,
|
36 |
+
prompt,
|
37 |
+
quality_select,
|
38 |
+
styles_Radio,
|
39 |
+
FooocusExpansion_select,
|
40 |
+
)
|
41 |
+
|
42 |
+
if nsfw_select:
|
43 |
+
pipe.set_adapters(
|
44 |
+
["feifei", "sldr_flux_nsfw_v2"],
|
45 |
+
adapter_weights=[num_feifei, nsfw_slider],
|
46 |
+
)
|
47 |
+
pipe.fuse_lora(
|
48 |
+
adapter_name=["feifei", "sldr_flux_nsfw_v2"],
|
49 |
+
lora_scale=1.0,
|
50 |
+
)
|
51 |
+
else:
|
52 |
+
pipe.set_adapters(
|
53 |
+
["feifei"],
|
54 |
+
adapter_weights=[num_feifei],
|
55 |
+
)
|
56 |
+
pipe.fuse_lora(
|
57 |
+
adapter_name=["feifei"],
|
58 |
+
lora_scale=1.0,
|
59 |
+
)
|
60 |
+
|
61 |
+
# pipe.unload_lora_weights()
|
62 |
+
|
63 |
+
image = pipe(
|
64 |
+
prompt="flux, 8k, ",
|
65 |
+
prompt_2=prompt,
|
66 |
+
width=width,
|
67 |
+
height=height,
|
68 |
+
num_inference_steps=num_inference_steps,
|
69 |
+
generator=generator,
|
70 |
+
guidance_scale=guidance_scale,
|
71 |
+
output_type="pil",
|
72 |
+
).images[0]
|
73 |
+
|
74 |
+
if sharpened_select:
|
75 |
+
feifeisharpened(image, num_strength)
|
76 |
+
|
77 |
+
return image, prompt
|
feifeiui/feifeiui.py
CHANGED
@@ -1,208 +1,221 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import numpy as np
|
3 |
-
import config
|
4 |
-
|
5 |
-
from feifeilib.feifeichat import feifeichat
|
6 |
-
from feifeilib.feifeitexttoimg import feifeitexttoimg
|
7 |
-
from feifeilib.feifeiflorence import feifeiflorence
|
8 |
-
from feifeilib.feifeifluxapi import feifeifluxapi
|
9 |
-
from feifeilib.feifeiflorencebase import process_image
|
10 |
-
|
11 |
-
MAX_SEED = np.iinfo(np.int32).max
|
12 |
-
MAX_IMAGE_SIZE = 2048
|
13 |
-
|
14 |
-
|
15 |
-
css = """
|
16 |
-
#col-container {
|
17 |
-
width: auto;
|
18 |
-
height: 998px;
|
19 |
-
}
|
20 |
-
"""
|
21 |
-
|
22 |
-
|
23 |
-
def create_ui():
|
24 |
-
with gr.Blocks(css=css) as FeiFei:
|
25 |
-
with gr.Row():
|
26 |
-
with gr.Column(scale=3):
|
27 |
-
with gr.Tab("FeiFei"):
|
28 |
-
with gr.Row():
|
29 |
-
with gr.Column(scale=1):
|
30 |
-
prompt = gr.Text(
|
31 |
-
label="Prompt",
|
32 |
-
show_label=False,
|
33 |
-
placeholder="Enter your prompt",
|
34 |
-
value="real girl in real life, ",
|
35 |
-
max_lines=12,
|
36 |
-
container=False,
|
37 |
-
)
|
38 |
-
feifei_button = gr.Button("FeiFei")
|
39 |
-
quality_select = gr.Checkbox(label="high quality")
|
40 |
-
sharpened_select = gr.Checkbox(label="Sharpened")
|
41 |
-
FooocusExpansion_select = gr.Checkbox(
|
42 |
-
label="Expansion", value=True
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
import config
|
4 |
+
|
5 |
+
from feifeilib.feifeichat import feifeichat
|
6 |
+
from feifeilib.feifeitexttoimg import feifeitexttoimg
|
7 |
+
from feifeilib.feifeiflorence import feifeiflorence
|
8 |
+
from feifeilib.feifeifluxapi import feifeifluxapi
|
9 |
+
from feifeilib.feifeiflorencebase import process_image
|
10 |
+
|
11 |
+
MAX_SEED = np.iinfo(np.int32).max
|
12 |
+
MAX_IMAGE_SIZE = 2048
|
13 |
+
|
14 |
+
|
15 |
+
css = """
|
16 |
+
#col-container {
|
17 |
+
width: auto;
|
18 |
+
height: 998px;
|
19 |
+
}
|
20 |
+
"""
|
21 |
+
|
22 |
+
|
23 |
+
def create_ui():
|
24 |
+
with gr.Blocks(css=css) as FeiFei:
|
25 |
+
with gr.Row():
|
26 |
+
with gr.Column(scale=3):
|
27 |
+
with gr.Tab("FeiFei"):
|
28 |
+
with gr.Row():
|
29 |
+
with gr.Column(scale=1):
|
30 |
+
prompt = gr.Text(
|
31 |
+
label="Prompt",
|
32 |
+
show_label=False,
|
33 |
+
placeholder="Enter your prompt",
|
34 |
+
value="real girl in real life, ",
|
35 |
+
max_lines=12,
|
36 |
+
container=False,
|
37 |
+
)
|
38 |
+
feifei_button = gr.Button("FeiFei")
|
39 |
+
quality_select = gr.Checkbox(label="high quality")
|
40 |
+
sharpened_select = gr.Checkbox(label="Sharpened")
|
41 |
+
FooocusExpansion_select = gr.Checkbox(
|
42 |
+
label="Expansion", value=True
|
43 |
+
)
|
44 |
+
styles_name = [style["name"] for style in config.style_list]
|
45 |
+
styles_Radio = gr.Dropdown(
|
46 |
+
styles_name,
|
47 |
+
label="Styles",
|
48 |
+
multiselect=False,
|
49 |
+
value="Photographic",
|
50 |
+
)
|
51 |
+
nsfw_select = gr.Checkbox(label="NSFW")
|
52 |
+
nsfw_slider = gr.Slider(
|
53 |
+
label="NSFW",
|
54 |
+
minimum=0,
|
55 |
+
maximum=2,
|
56 |
+
step=0.05,
|
57 |
+
value=0.45,
|
58 |
+
)
|
59 |
+
out_prompt = gr.Text(
|
60 |
+
label="Prompt",
|
61 |
+
show_label=False,
|
62 |
+
max_lines=12,
|
63 |
+
placeholder="this photo prompt",
|
64 |
+
value="",
|
65 |
+
container=False,
|
66 |
+
)
|
67 |
+
with gr.Accordion("More", open=False):
|
68 |
+
seed = gr.Slider(
|
69 |
+
label="Seed",
|
70 |
+
minimum=0,
|
71 |
+
maximum=MAX_SEED,
|
72 |
+
step=1,
|
73 |
+
value=0,
|
74 |
+
)
|
75 |
+
|
76 |
+
randomize_seed = gr.Checkbox(
|
77 |
+
label="Randomize seed", value=True
|
78 |
+
)
|
79 |
+
|
80 |
+
width = gr.Slider(
|
81 |
+
label="Width",
|
82 |
+
minimum=512,
|
83 |
+
maximum=MAX_IMAGE_SIZE,
|
84 |
+
step=64,
|
85 |
+
value=1088,
|
86 |
+
)
|
87 |
+
height = gr.Slider(
|
88 |
+
label="Height",
|
89 |
+
minimum=512,
|
90 |
+
maximum=MAX_IMAGE_SIZE,
|
91 |
+
step=64,
|
92 |
+
value=1920,
|
93 |
+
)
|
94 |
+
|
95 |
+
num_inference_steps = gr.Slider(
|
96 |
+
label="Number of inference steps",
|
97 |
+
minimum=1,
|
98 |
+
maximum=50,
|
99 |
+
step=1,
|
100 |
+
value=4,
|
101 |
+
)
|
102 |
+
guidancescale = gr.Slider(
|
103 |
+
label="Guidance scale",
|
104 |
+
minimum=0,
|
105 |
+
maximum=10,
|
106 |
+
step=0.1,
|
107 |
+
value=3.5,
|
108 |
+
)
|
109 |
+
num_strength = gr.Slider(
|
110 |
+
label="strength",
|
111 |
+
minimum=0,
|
112 |
+
maximum=2,
|
113 |
+
step=0.001,
|
114 |
+
value=0.035,
|
115 |
+
)
|
116 |
+
|
117 |
+
num_feifei = gr.Slider(
|
118 |
+
label="FeiFei",
|
119 |
+
minimum=0,
|
120 |
+
maximum=2,
|
121 |
+
step=0.05,
|
122 |
+
value=0.45,
|
123 |
+
)
|
124 |
+
|
125 |
+
with gr.Column(scale=2):
|
126 |
+
result = gr.Image(
|
127 |
+
label="Result",
|
128 |
+
show_label=False,
|
129 |
+
interactive=False,
|
130 |
+
height=940,
|
131 |
+
)
|
132 |
+
|
133 |
+
with gr.Tab("GenPrompt"):
|
134 |
+
|
135 |
+
input_img = gr.Image(
|
136 |
+
label="Input Picture",
|
137 |
+
show_label=False,
|
138 |
+
height=320,
|
139 |
+
type="filepath",
|
140 |
+
)
|
141 |
+
|
142 |
+
florence_btn = gr.Button(value="GenPrompt")
|
143 |
+
|
144 |
+
output_text = gr.Textbox(
|
145 |
+
label="Output Text", show_label=False, container=False
|
146 |
+
)
|
147 |
+
|
148 |
+
with gr.Tab(label="Florence-2"):
|
149 |
+
with gr.Row():
|
150 |
+
florence_input_img = gr.Image(
|
151 |
+
label="Input Picture", height=320, type="filepath"
|
152 |
+
)
|
153 |
+
with gr.Row():
|
154 |
+
florence_submit_btn = gr.Button(value="GenPrompt")
|
155 |
+
with gr.Row():
|
156 |
+
florence_output_text = gr.Textbox(
|
157 |
+
label="Flux Prompt", show_label=False, container=False
|
158 |
+
)
|
159 |
+
|
160 |
+
with gr.Column(scale=1, elem_id="col-container"):
|
161 |
+
gr.ChatInterface(
|
162 |
+
feifeichat,
|
163 |
+
type="messages",
|
164 |
+
multimodal=True,
|
165 |
+
additional_inputs=[
|
166 |
+
gr.Checkbox(label="Feifei", value=True),
|
167 |
+
gr.Dropdown(
|
168 |
+
[
|
169 |
+
"meta-llama/Llama-3.3-70B-Instruct",
|
170 |
+
"CohereForAI/c4ai-command-r-plus-08-2024",
|
171 |
+
"Qwen/Qwen2.5-72B-Instruct",
|
172 |
+
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
173 |
+
"NousResearch/Hermes-3-Llama-3.1-8B",
|
174 |
+
"mistralai/Mistral-Nemo-Instruct-2411",
|
175 |
+
"microsoft/Phi-3.5-mini-instruct",
|
176 |
+
],
|
177 |
+
value="mistralai/Mistral-Nemo-Instruct-2411",
|
178 |
+
show_label=False,
|
179 |
+
container=False,
|
180 |
+
),
|
181 |
+
gr.Radio(
|
182 |
+
["pixtral", "Vsiion"],
|
183 |
+
value="pixtral",
|
184 |
+
show_label=False,
|
185 |
+
container=False,
|
186 |
+
),
|
187 |
+
],
|
188 |
+
)
|
189 |
+
|
190 |
+
feifei_button.click(
|
191 |
+
fn=feifeitexttoimg, # Function to run for this button
|
192 |
+
inputs=[
|
193 |
+
prompt,
|
194 |
+
quality_select,
|
195 |
+
sharpened_select,
|
196 |
+
styles_Radio,
|
197 |
+
FooocusExpansion_select,
|
198 |
+
nsfw_select,
|
199 |
+
nsfw_slider,
|
200 |
+
seed,
|
201 |
+
randomize_seed,
|
202 |
+
width,
|
203 |
+
height,
|
204 |
+
num_inference_steps,
|
205 |
+
guidancescale,
|
206 |
+
num_strength,
|
207 |
+
num_feifei,
|
208 |
+
],
|
209 |
+
outputs=[result, out_prompt],
|
210 |
+
)
|
211 |
+
|
212 |
+
florence_btn.click(
|
213 |
+
fn=feifeiflorence, # Function to run when the button is clicked
|
214 |
+
inputs=[input_img], # Input components for the function
|
215 |
+
outputs=[output_text], # Output component for the function
|
216 |
+
)
|
217 |
+
|
218 |
+
florence_submit_btn.click(
|
219 |
+
process_image, [florence_input_img], [florence_output_text]
|
220 |
+
)
|
221 |
+
return FeiFei
|
requirements.txt
CHANGED
@@ -1,16 +1,16 @@
|
|
1 |
-
timm
|
2 |
-
gradio
|
3 |
-
mistralai
|
4 |
-
requests
|
5 |
-
accelerate
|
6 |
-
git+https://github.com/huggingface/diffusers.git
|
7 |
-
invisible_watermark
|
8 |
-
torch
|
9 |
-
xformers
|
10 |
-
sentencepiece
|
11 |
-
transformers
|
12 |
-
peft
|
13 |
-
psutil
|
14 |
-
gradio_client
|
15 |
-
spaces
|
16 |
matplotlib
|
|
|
1 |
+
timm
|
2 |
+
gradio
|
3 |
+
mistralai
|
4 |
+
requests
|
5 |
+
accelerate
|
6 |
+
git+https://github.com/huggingface/diffusers.git
|
7 |
+
invisible_watermark
|
8 |
+
torch
|
9 |
+
xformers
|
10 |
+
sentencepiece
|
11 |
+
transformers
|
12 |
+
peft
|
13 |
+
psutil
|
14 |
+
gradio_client
|
15 |
+
spaces
|
16 |
matplotlib
|