Yardenfren commited on
Commit
6625c7b
1 Parent(s): 5a1bf4f

Update app_inference.py

Browse files
Files changed (1) hide show
  1. app_inference.py +102 -108
app_inference.py CHANGED
@@ -43,6 +43,9 @@ body {
43
  }
44
  """
45
 
 
 
 
46
 
47
  def get_choices(hf_token):
48
  api = HfApi(token=hf_token)
@@ -129,112 +132,103 @@ class InferenceUtil:
129
  raise type(e)(f'failed to update_model_info, due to: {e}')
130
 
131
 
132
- def create_inference_demo(pipe, #: InferencePipeline,
133
- hf_token: str | None = None) -> gr.Blocks:
134
- with gr.Blocks(css=css) as demo:
135
- with gr.Row(elem_classes="gr-row"):
136
- with gr.Column():
137
- with gr.Group(elem_classes="lora-column"):
138
- gr.Markdown('## Content B-LoRA')
139
- content_checkbox = gr.Checkbox(label='Use Content Only', value=False)
140
- content_lora_model_id = gr.Dropdown(label='Model ID', choices=[])
141
- content_prompt = gr.Text(label='Content instance prompt', interactive=False, max_lines=1)
142
- content_image = gr.Image(label='Content Image', elem_classes="gr-image")
143
- with gr.Column():
144
- with gr.Group(elem_classes="lora-column"):
145
- gr.Markdown('## Style B-LoRA')
146
- style_checkbox = gr.Checkbox(label='Use Style Only', value=False)
147
- style_lora_model_id = gr.Dropdown(label='Model ID', choices=[])
148
- style_prompt = gr.Text(label='Style instance prompt', interactive=False, max_lines=1)
149
- style_image = gr.Image(label='Style Image', elem_classes="gr-image")
150
- with gr.Row(elem_classes="gr-row"):
151
- with gr.Column():
152
- with gr.Group():
153
- prompt = gr.Textbox(
154
- label='Prompt',
155
- max_lines=1,
156
- placeholder='Example: "A [c] in [s] style"'
157
- )
158
- result = gr.Image(label='Result')
159
- with gr.Accordion('Other Parameters', open=False, elem_classes="gr-accordion"):
160
- content_alpha = gr.Slider(label='Content B-LoRA alpha',
161
- minimum=0,
162
- maximum=2,
163
- step=0.05,
164
- value=1)
165
- style_alpha = gr.Slider(label='Style B-LoRA alpha',
166
- minimum=0,
167
- maximum=2,
168
- step=0.05,
169
- value=1)
170
- seed = gr.Slider(label='Seed',
171
- minimum=0,
172
- maximum=100000,
173
- step=1,
174
- value=8888)
175
- num_steps = gr.Slider(label='Number of Steps',
176
  minimum=0,
177
- maximum=100,
178
- step=1,
179
- value=50)
180
- guidance_scale = gr.Slider(label='CFG Scale',
181
- minimum=0,
182
- maximum=50,
183
- step=0.1,
184
- value=7.5)
185
-
186
- run_button = gr.Button('Generate')
187
- demo.load(demo_init, inputs=[],
188
- outputs=[content_lora_model_id, content_prompt, content_image, style_lora_model_id, style_prompt,
189
- style_image, prompt], queue=False, show_progress="hidden")
190
- content_lora_model_id.change(
191
- fn=app.update_model_info,
192
- inputs=content_lora_model_id,
193
- outputs=[
194
- content_prompt,
195
- content_image,
196
- ])
197
- style_lora_model_id.change(
198
- fn=app.update_model_info,
199
- inputs=style_lora_model_id,
200
- outputs=[
201
- style_prompt,
202
- style_image,
203
- ])
204
- style_prompt.change(
205
- fn=lambda content_blora_prompt,
206
- style_blora_prompt: f'{content_blora_prompt} in {style_blora_prompt[0].lower() + style_blora_prompt[1:]} style' if style_blora_prompt else content_blora_prompt,
207
- inputs=[content_prompt, style_prompt],
208
- outputs=prompt,
209
- )
210
- content_prompt.change(
211
- fn=lambda content_blora_prompt,
212
- style_blora_prompt: f'{content_blora_prompt} in {style_blora_prompt[0].lower() + style_blora_prompt[1:]} style' if content_blora_prompt else style_blora_prompt,
213
- inputs=[content_prompt, style_prompt],
214
- outputs=prompt,
215
- )
216
- content_checkbox.change(toggle_column, inputs=[content_checkbox],
217
- outputs=[style_lora_model_id])
218
- style_checkbox.change(toggle_column, inputs=[style_checkbox],
219
- outputs=[content_lora_model_id])
220
- inputs = [
221
- content_lora_model_id,
222
- style_lora_model_id,
223
- prompt,
224
- content_alpha,
225
- style_alpha,
226
- seed,
227
- num_steps,
228
- guidance_scale,
229
- ]
230
- prompt.submit(fn=pipe.run, inputs=inputs, outputs=result)
231
- run_button.click(fn=pipe.run, inputs=inputs, outputs=result)
232
- return demo
233
-
234
-
235
- if __name__ == '__main__':
236
- hf_token = os.getenv('HF_TOKEN')
237
- pipe = InferencePipeline(hf_token)
238
- app = InferenceUtil(hf_token)
239
- demo = create_inference_demo(pipe, hf_token)
240
- demo.queue(max_size=10).launch(share=False)
 
 
 
 
 
 
 
 
43
  }
44
  """
45
 
46
+ hf_token = os.getenv('HF_TOKEN')
47
+ pipe = InferencePipeline(hf_token)
48
+ app = InferenceUtil(hf_token)
49
 
50
  def get_choices(hf_token):
51
  api = HfApi(token=hf_token)
 
132
  raise type(e)(f'failed to update_model_info, due to: {e}')
133
 
134
 
135
+
136
+ with gr.Blocks(css=css) as demo:
137
+ with gr.Row(elem_classes="gr-row"):
138
+ with gr.Column():
139
+ with gr.Group(elem_classes="lora-column"):
140
+ gr.Markdown('## Content B-LoRA')
141
+ content_checkbox = gr.Checkbox(label='Use Content Only', value=False)
142
+ content_lora_model_id = gr.Dropdown(label='Model ID', choices=[])
143
+ content_prompt = gr.Text(label='Content instance prompt', interactive=False, max_lines=1)
144
+ content_image = gr.Image(label='Content Image', elem_classes="gr-image")
145
+ with gr.Column():
146
+ with gr.Group(elem_classes="lora-column"):
147
+ gr.Markdown('## Style B-LoRA')
148
+ style_checkbox = gr.Checkbox(label='Use Style Only', value=False)
149
+ style_lora_model_id = gr.Dropdown(label='Model ID', choices=[])
150
+ style_prompt = gr.Text(label='Style instance prompt', interactive=False, max_lines=1)
151
+ style_image = gr.Image(label='Style Image', elem_classes="gr-image")
152
+ with gr.Row(elem_classes="gr-row"):
153
+ with gr.Column():
154
+ with gr.Group():
155
+ prompt = gr.Textbox(
156
+ label='Prompt',
157
+ max_lines=1,
158
+ placeholder='Example: "A [c] in [s] style"'
159
+ )
160
+ result = gr.Image(label='Result')
161
+ with gr.Accordion('Other Parameters', open=False, elem_classes="gr-accordion"):
162
+ content_alpha = gr.Slider(label='Content B-LoRA alpha',
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  minimum=0,
164
+ maximum=2,
165
+ step=0.05,
166
+ value=1)
167
+ style_alpha = gr.Slider(label='Style B-LoRA alpha',
168
+ minimum=0,
169
+ maximum=2,
170
+ step=0.05,
171
+ value=1)
172
+ seed = gr.Slider(label='Seed',
173
+ minimum=0,
174
+ maximum=100000,
175
+ step=1,
176
+ value=8888)
177
+ num_steps = gr.Slider(label='Number of Steps',
178
+ minimum=0,
179
+ maximum=100,
180
+ step=1,
181
+ value=50)
182
+ guidance_scale = gr.Slider(label='CFG Scale',
183
+ minimum=0,
184
+ maximum=50,
185
+ step=0.1,
186
+ value=7.5)
187
+ run_button = gr.Button('Generate')
188
+ demo.load(demo_init, inputs=[],
189
+ outputs=[content_lora_model_id, content_prompt, content_image, style_lora_model_id, style_prompt,
190
+ style_image, prompt], queue=False, show_progress="hidden")
191
+ content_lora_model_id.change(
192
+ fn=app.update_model_info,
193
+ inputs=content_lora_model_id,
194
+ outputs=[
195
+ content_prompt,
196
+ content_image,
197
+ ])
198
+ style_lora_model_id.change(
199
+ fn=app.update_model_info,
200
+ inputs=style_lora_model_id,
201
+ outputs=[
202
+ style_prompt,
203
+ style_image,
204
+ ])
205
+ style_prompt.change(
206
+ fn=lambda content_blora_prompt,
207
+ style_blora_prompt: f'{content_blora_prompt} in {style_blora_prompt[0].lower() + style_blora_prompt[1:]} style' if style_blora_prompt else content_blora_prompt,
208
+ inputs=[content_prompt, style_prompt],
209
+ outputs=prompt,
210
+ )
211
+ content_prompt.change(
212
+ fn=lambda content_blora_prompt,
213
+ style_blora_prompt: f'{content_blora_prompt} in {style_blora_prompt[0].lower() + style_blora_prompt[1:]} style' if content_blora_prompt else style_blora_prompt,
214
+ inputs=[content_prompt, style_prompt],
215
+ outputs=prompt,
216
+ )
217
+ content_checkbox.change(toggle_column, inputs=[content_checkbox],
218
+ outputs=[style_lora_model_id])
219
+ style_checkbox.change(toggle_column, inputs=[style_checkbox],
220
+ outputs=[content_lora_model_id])
221
+ inputs = [
222
+ content_lora_model_id,
223
+ style_lora_model_id,
224
+ prompt,
225
+ content_alpha,
226
+ style_alpha,
227
+ seed,
228
+ num_steps,
229
+ guidance_scale,
230
+ ]
231
+ prompt.submit(fn=pipe.run, inputs=inputs, outputs=result)
232
+ run_button.click(fn=pipe.run, inputs=inputs, outputs=result)
233
+
234
+ demo.queue(max_size=10).launch(share=False)