unknown commited on
Commit
55deede
1 Parent(s): 63fa9c2

Update space

Browse files
Files changed (2) hide show
  1. app.py +161 -141
  2. requirements.txt +0 -0
app.py CHANGED
@@ -1,154 +1,174 @@
1
  import gradio as gr
2
- import numpy as np
3
- import random
 
 
 
 
 
 
4
 
5
- # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
7
- import torch
8
 
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
 
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
 
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
 
 
 
19
 
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
22
 
 
 
 
 
 
 
 
23
 
24
- # @spaces.GPU #[uncomment to use ZeroGPU]
25
- def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
35
- ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
-
39
- generator = torch.Generator().manual_seed(seed)
40
-
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
- ).images[0]
50
-
51
- return image, seed
52
-
53
-
54
- examples = [
55
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
- "An astronaut riding a green horse",
57
- "A delicious ceviche cheesecake slice",
58
- ]
59
-
60
- css = """
61
- #col-container {
62
- margin: 0 auto;
63
- max-width: 640px;
64
- }
65
- """
66
-
67
- with gr.Blocks(css=css) as demo:
68
- with gr.Column(elem_id="col-container"):
69
- gr.Markdown(" # Text-to-Image Gradio Template")
70
-
71
- with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
- show_label=False,
75
- max_lines=1,
76
- placeholder="Enter your prompt",
77
- container=False,
78
- )
79
-
80
- run_button = gr.Button("Run", scale=0, variant="primary")
81
-
82
- result = gr.Image(label="Result", show_label=False)
83
-
84
- with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
90
- )
91
-
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
98
- )
99
-
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
 
 
 
 
 
102
  with gr.Row():
103
- width = gr.Slider(
104
- label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
109
- )
110
-
111
- height = gr.Slider(
112
- label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
117
- )
118
-
119
  with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=50,
132
- step=1,
133
- value=2, # Replace with defaults that work for your model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  )
135
-
136
- gr.Examples(examples=examples, inputs=[prompt])
137
- gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
- fn=infer,
140
- inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
- width,
146
- height,
147
- guidance_scale,
148
- num_inference_steps,
149
- ],
150
- outputs=[result, seed],
151
- )
152
-
153
- if __name__ == "__main__":
154
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from sugg_gene import suggest_gene
3
+ from clothGen import cloth_gen
4
+ from user_dress import user_cloths
5
+ import requests
6
+ import os
7
+ from io import BytesIO
8
+ from PIL import Image
9
+ from cal_compatibility import cal_compatibility
10
 
 
 
 
11
 
12
+ gen_pic_num = 6
13
+ save_directory = "downloads"
14
 
 
 
 
 
15
 
16
+ def get_select_index(evt: gr.SelectData, gallery):
17
+ print(gallery[evt.index][0])
18
+ with open(os.path.join(save_directory, f"cloth_intro_{evt.index+1}.txt"), "r") as f:
19
+ introduction = f.read()
20
+ return gallery[evt.index][0], introduction
21
 
 
 
22
 
23
+ def update_choices(dropout1, dropout2,):
24
+ if dropout1 == "男":
25
+ option = ['倒三角形', '矩形', '苹果形', '沙漏型', '胖型']
26
+ else:
27
+ option = ["梨形", "草莓形", "沙漏形", "标准", "苹果形"]
28
+ dropout2 = gr.Dropdown(choices=option)
29
+ return dropout2
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ with gr.Blocks(theme=gr.themes.Base()) as demo:
33
+ with gr.Row():
34
+ # 左侧模块
35
+ with gr.Column(scale=1):
36
  with gr.Row():
37
+ gr.Markdown(""
38
+ "# 用户信息"
39
+ "")
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  with gr.Row():
41
+ text_input1 = gr.Textbox(label="用户姓名", min_width=100)
42
+ text_input2 = gr.Textbox(label="身高/cm", min_width=100)
43
+ text_input3 = gr.Textbox(label="体重/kg", min_width=100)
44
+ text_input4 = gr.Textbox(label="腰围/cm", min_width=100)
45
+ text_input5 = gr.Textbox(label="胸围/cm", min_width=100)
46
+ text_input6 = gr.Textbox(label="臀围/cm", min_width=100)
47
+ text_input7 = gr.Textbox(label="肩���/cm", min_width=100)
48
+ text_input8 = gr.Textbox(label="腿长/cm", min_width=100)
49
+ text_input9 = gr.Textbox(label="臂长/cm", min_width=100)
50
+ dropdown_options1 = ["女", "男"]
51
+ dropdown_input1 = gr.Dropdown(choices=dropdown_options1, label="性别", min_width=100)
52
+ dropdown_input2 = gr.Dropdown(choices=["梨形", "草莓形", "沙漏形", "标准", "苹果形"], label="体型分类", min_width=100)
53
+ dropdown_input1.change(fn=update_choices, inputs=[dropdown_input1, dropdown_input2], outputs=dropdown_input2)
54
+ dropdown_options3 = ["浅色", "中等偏黄色", "中等偏褐色", "深色"]
55
+ dropdown_input3 = gr.Dropdown(choices=dropdown_options3, label="肤色", min_width=100)
56
+ text_input10 = gr.Textbox(label="穿衣风格偏好", min_width=1000)
57
+ text_input11 = gr.Textbox(label="生话方式和场景需求", min_width=1000)
58
+ text_input12 = gr.Textbox(label="其他特殊需求", min_width=1000)
59
+ with gr.Row():
60
+ user_pic = gr.Image(label="用户照片", value="model.jpg", height=550, width=300)
61
 
62
+ # 右侧模块
63
+ with gr.Column(scale=2):
64
+ with gr.Row():
65
+ gr.Markdown(""
66
+ "# 穿搭建议"
67
+ "")
68
+ with gr.Row():
69
+ text_output1 = gr.Textbox(label="穿搭建议", lines=12, max_lines=12, interactive=False, show_label=False,
70
+ min_width=1000)
71
+ submit_button_1 = gr.Button("AI智能分析,生成穿搭建议", min_width=1000)
72
+ image_output_1 = gr.Image(label="显示图像", value="image 209.png")
73
+
74
+ gallery_1 = gr.Gallery(
75
+ label="服装", elem_id="gallery", interactive=False,
76
+ value=[
77
+ # os.path.join(example_path, '上衣/_WEB_2016_09_26__2016092617451357e8ee2957aa1_TD.jpg'),
78
+ # os.path.join(example_path, '上衣/_WEB_2016_09_27__2016092717211057ea3a069c749_TD.jpg'),
79
+ # os.path.join(example_path, '上衣/_WEB_2016_09_27__2016092717391657ea3e446ce3f_TD.jpg'),
80
+ # os.path.join(example_path, '上衣/_WEB_2016_09_27__2016092717573057ea428a305bc_TD.jpg'),
81
+ # os.path.join(example_path, '上衣/_WEB_2016_09_28__2016092810150157eb27a56a631_TD.jpg'),
82
+ # os.path.join(example_path, '上衣/_WEB_2016_09_28__2016092810464557eb2f15e1df3_TD.jpg'),
83
+ ],
84
+ columns=[4], rows=[2], object_fit="contain", height=250, min_width=450)
85
+
86
+ gallery_3 = gr.Gallery(
87
+ label="配饰", elem_id="gallery",
88
+ value=[
89
+ 'downloads/access_1.jpg',
90
+ 'downloads/access_2.jpg',
91
+ 'downloads/access_3.jpg',
92
+ 'downloads/access_4.jpg',
93
+ 'downloads/access_5.jpg',
94
+ 'downloads/access_6.jpg',
95
+ 'downloads/access_7.jpg',
96
+ 'downloads/access_8.jpg',
97
+ ],
98
+ columns=[4], rows=[2], object_fit="contain", height=250, min_width=450)
99
+ submit_button_2 = gr.Button("AI智能分析,生成民族服饰")
100
+
101
+ with gr.Column(scale=2):
102
+ with gr.Row():
103
+ gr.Markdown(""
104
+ "# 搭配生成"
105
+ "")
106
+ with gr.Row():
107
+ gallery_4 = gr.Gallery(
108
+ label="套装", elem_id="gallery",
109
+ value=[],
110
+ columns=[3], rows=[1], object_fit="contain", height=180, min_width=450)
111
+ with gr.Row():
112
+ submit_button_3 = gr.Button("服饰及搭配兼容性排序")
113
+ with gr.Row():
114
+ image_output_5 = gr.Image(label="显示图像", show_label=False, min_width=200, height=350, interactive=False)
115
+ intro = gr.Textbox(label="服饰介绍", lines=14, max_lines=14)
116
+ with gr.Row():
117
+ gallery_user = gr.Gallery(
118
+ label="试穿结果",
119
+ elem_id="gallery",
120
+ value=[],
121
+ columns=[3], rows=[2],
122
+ object_fit="contain",
123
+ min_width=200,
124
+ height=350,
125
  )
126
+ with gr.Row():
127
+ submit_button_4 = gr.Button("虚拟试穿")
128
+ with gr.Row():
129
+ feedback = gr.Textbox(label="反馈", placeholder="可以从款式、颜色、图案、风格倾向、文化偏好角度进行反馈", lines=1,
130
+ max_lines=1, elem_id="feedback")
131
+ with gr.Row():
132
+ submit_button_5 = gr.Button("反馈")
133
+ with gr.Row():
134
+ gr.Markdown("""
135
+ 女性体型备注:
136
+ 1. **梨形身材**:臀围比胸围**至少大5.08厘米**
137
+ 2. **草莓形身材**:臀围比胸围**至少小5.08厘米**
138
+ 3. **沙漏形身材**:胸围比腰围**至少大3.81厘米**且腰部线条明显
139
+ 4. **标准身材**:胸围比腰围**至少大3.81厘米**且腰部线条不明显
140
+ 5. **苹果形身材**:胸围比腰围**至少小3.81厘米**
141
+
142
+ 男性体型备注:
143
+ 1. **倒三角形身材**:肩宽比腰围**至少大10厘米**
144
+ 2. **矩形身材**:肩宽与腰围的差异**小于5厘米**
145
+ 3. **苹果形身材**:腰围比肩宽**至少大7.5厘米**
146
+ 4. **沙漏型身材**:腰围比肩宽或臀围**至少小10厘米**,且肩宽和臀围的差异**小于5厘米**
147
+ 5. **胖型身材**:腰围比胸围或肩宽**至少大10厘米**
148
+ """)
149
+
150
+ submit_button_1.click(fn=suggest_gene,
151
+ inputs=[text_input1, text_input2, text_input3, text_input4, text_input5,
152
+ text_input6, text_input7, text_input8, text_input9, dropdown_input1,
153
+ dropdown_input2, dropdown_input3, text_input10, text_input11, text_input12,
154
+ feedback, user_pic],
155
+ outputs=[text_output1])
156
+ submit_button_2.click(fn=cloth_gen,
157
+ inputs=dropdown_input1,
158
+ outputs=[gallery_1, image_output_5, intro])
159
+
160
+ gallery_1.select(fn=get_select_index, inputs=gallery_1, outputs=[image_output_5, intro])
161
+ submit_button_3.click(fn=cal_compatibility,
162
+ inputs=[],
163
+ outputs=[gallery_4])
164
+ submit_button_4.click(fn=user_cloths,
165
+ inputs=[user_pic, image_output_5],
166
+ outputs=gallery_user)
167
+ submit_button_5.click(fn=suggest_gene,
168
+ inputs=[text_input1, text_input2, text_input3, text_input4, text_input5,
169
+ text_input6, text_input7, text_input8, text_input9, dropdown_input1,
170
+ dropdown_input2, dropdown_input3, text_input10, text_input11, text_input12,
171
+ feedback, user_pic],
172
+ outputs=[text_output1])
173
+
174
+ demo.launch(server_port=7860, share=True)
requirements.txt CHANGED
Binary files a/requirements.txt and b/requirements.txt differ