mokady commited on
Commit
7565e99
1 Parent(s): e2160a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -28
app.py CHANGED
@@ -6,7 +6,26 @@ import diffusers
6
  from share_btn import community_icon_html, loading_icon_html, share_js
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
- pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  def read_content(file_path: str) -> str:
12
  """read the content of target file
@@ -16,20 +35,11 @@ def read_content(file_path: str) -> str:
16
 
17
  return content
18
 
 
19
  def predict(dict, prompt="", negative_prompt="", guidance_scale=7.5, steps=20, strength=1.0, scheduler="EulerDiscreteScheduler"):
20
  if negative_prompt == "":
21
  negative_prompt = None
22
- scheduler_class_name = scheduler.split("-")[0]
23
 
24
- add_kwargs = {}
25
- if len(scheduler.split("-")) > 1:
26
- add_kwargs["use_karras"] = True
27
- if len(scheduler.split("-")) > 2:
28
- add_kwargs["algorithm_type"] = "sde-dpmsolver++"
29
-
30
- scheduler = getattr(diffusers, scheduler_class_name)
31
- pipe.scheduler = scheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler", **add_kwargs)
32
-
33
  init_image = dict["image"].convert("RGB").resize((1024, 1024))
34
  mask = dict["mask"].convert("RGB").resize((1024, 1024))
35
 
@@ -77,7 +87,15 @@ div#share-btn-container > div {flex-direction: row;background: black;align-items
77
 
78
  image_blocks = gr.Blocks(css=css, elem_id="total-container")
79
  with image_blocks as demo:
80
- gr.HTML(read_content("header.html"))
 
 
 
 
 
 
 
 
81
  with gr.Row():
82
  with gr.Column():
83
  image = gr.Image(source='upload', tool='sketch', elem_id="image_upload", type="pil", label="Upload",height=400)
@@ -88,13 +106,11 @@ with image_blocks as demo:
88
 
89
  with gr.Accordion(label="Advanced Settings", open=False):
90
  with gr.Row(mobile_collapse=False, equal_height=True):
91
- guidance_scale = gr.Number(value=7.5, minimum=1.0, maximum=20.0, step=0.1, label="guidance_scale")
92
- steps = gr.Number(value=20, minimum=10, maximum=30, step=1, label="steps")
93
  strength = gr.Number(value=0.99, minimum=0.01, maximum=1.0, step=0.01, label="strength")
94
- negative_prompt = gr.Textbox(label="negative_prompt", placeholder="Your negative prompt", info="what you don't want to see in the image")
95
- with gr.Row(mobile_collapse=False, equal_height=True):
96
- schedulers = ["DEISMultistepScheduler", "HeunDiscreteScheduler", "EulerDiscreteScheduler", "DPMSolverMultistepScheduler", "DPMSolverMultistepScheduler-Karras", "DPMSolverMultistepScheduler-Karras-SDE"]
97
- scheduler = gr.Dropdown(label="Schedulers", choices=schedulers, value="EulerDiscreteScheduler")
98
 
99
  with gr.Column():
100
  image_out = gr.Image(label="Output", elem_id="output-img", height=400)
@@ -110,16 +126,7 @@ with image_blocks as demo:
110
 
111
  gr.Examples(
112
  examples=[
113
- ["./imgs/aaa (8).png"],
114
- ["./imgs/download (1).jpeg"],
115
- ["./imgs/0_oE0mLhfhtS_3Nfm2.png"],
116
- ["./imgs/02_HubertyBlog-1-1024x1024.jpg"],
117
- ["./imgs/jdn_jacques_de_nuce-1024x1024.jpg"],
118
- ["./imgs/c4ca473acde04280d44128ad8ee09e8a.jpg"],
119
- ["./imgs/canam-electric-motorcycles-scaled.jpg"],
120
- ["./imgs/e8717ce80b394d1b9a610d04a1decd3a.jpeg"],
121
- ["./imgs/Nature___Mountains_Big_Mountain_018453_31.jpg"],
122
- ["./imgs/Multible-sharing-room_ccexpress-2-1024x1024.jpeg"],
123
  ],
124
  fn=predict,
125
  inputs=[image],
 
6
  from share_btn import community_icon_html, loading_icon_html, share_js
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
+ unet = UNet2DConditionModel.from_pretrained(
10
+ "briaai/BRIA-2.3-Inpainting",
11
+ subfolder="unet",
12
+ torch_dtype=torch.float16,
13
+ )
14
+
15
+ scheduler = DDIMScheduler.from_pretrained("briaai/BRIA-2.3", subfolder="scheduler",clip_sample=False)
16
+
17
+ pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
18
+ "briaai/BRIA-2.3",
19
+ unet=unet,
20
+ scheduler=scheduler,
21
+ torch_dtype=torch.float16,
22
+ force_zeros_for_empty_prompt=False
23
+ )
24
+ pipe = pipe.to(device)
25
+ pipe.force_zeros_for_empty_prompt = False
26
+
27
+ default_negative_prompt= "Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers"
28
+
29
 
30
  def read_content(file_path: str) -> str:
31
  """read the content of target file
 
35
 
36
  return content
37
 
38
+ @spaces.GPU()
39
  def predict(dict, prompt="", negative_prompt="", guidance_scale=7.5, steps=20, strength=1.0, scheduler="EulerDiscreteScheduler"):
40
  if negative_prompt == "":
41
  negative_prompt = None
 
42
 
 
 
 
 
 
 
 
 
 
43
  init_image = dict["image"].convert("RGB").resize((1024, 1024))
44
  mask = dict["mask"].convert("RGB").resize((1024, 1024))
45
 
 
87
 
88
  image_blocks = gr.Blocks(css=css, elem_id="total-container")
89
  with image_blocks as demo:
90
+ with gr.Column(elem_id="col-container"):
91
+ gr.Markdown("## BRIA 2.3")
92
+ gr.HTML('''
93
+ <p style="margin-bottom: 10px; font-size: 94%">
94
+ This is a demo for
95
+ <a href="https://huggingface.co/briaai/BRIA-2.3" target="_blank">BRIA 2.3 text-to-image </a>.
96
+ BRIA 2.3 improve the generation of humans and illustrations compared to BRIA 2.2 while still trained on licensed data, and so provide full legal liability coverage for copyright and privacy infringement.
97
+ </p>
98
+ ''')
99
  with gr.Row():
100
  with gr.Column():
101
  image = gr.Image(source='upload', tool='sketch', elem_id="image_upload", type="pil", label="Upload",height=400)
 
106
 
107
  with gr.Accordion(label="Advanced Settings", open=False):
108
  with gr.Row(mobile_collapse=False, equal_height=True):
109
+ guidance_scale = gr.Number(value=7.5, minimum=1.0, maximum=10.0, step=0.5, label="guidance_scale")
110
+ steps = gr.Number(value=30, minimum=20, maximum=50, step=1, label="steps")
111
  strength = gr.Number(value=0.99, minimum=0.01, maximum=1.0, step=0.01, label="strength")
112
+ negative_prompt = gr.Textbox(label="negative_prompt", value=default_negative_prompt, placeholder=default_negative_prompt, info="what you don't want to see in the image")
113
+
 
 
114
 
115
  with gr.Column():
116
  image_out = gr.Image(label="Output", elem_id="output-img", height=400)
 
126
 
127
  gr.Examples(
128
  examples=[
129
+ ["./imgs/example.png"],
 
 
 
 
 
 
 
 
 
130
  ],
131
  fn=predict,
132
  inputs=[image],