Eyalgut commited on
Commit
bff1c38
1 Parent(s): c19b32e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -3
app.py CHANGED
@@ -1,11 +1,41 @@
1
  import gradio as gr
2
  import torch
3
-
4
  import diffusers
5
  import os
6
  hf_token = os.environ.get("HF_TOKEN")
7
  from diffusers import StableDiffusionXLInpaintPipeline, DDIMScheduler, UNet2DConditionModel
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  unet = UNet2DConditionModel.from_pretrained(
@@ -42,8 +72,20 @@ def predict(dict, prompt="", negative_prompt="", guidance_scale=5, steps=30, str
42
  if negative_prompt == "":
43
  negative_prompt = None
44
 
45
- init_image = dict["image"].convert("RGB").resize((1024, 1024))
46
- mask = dict["mask"].convert("RGB").resize((1024, 1024))
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
  output = pipe(prompt = prompt, negative_prompt=negative_prompt, image=init_image, mask_image=mask, guidance_scale=guidance_scale, num_inference_steps=int(steps), strength=strength)
49
 
 
1
  import gradio as gr
2
  import torch
3
+ import numpy as np
4
  import diffusers
5
  import os
6
  hf_token = os.environ.get("HF_TOKEN")
7
  from diffusers import StableDiffusionXLInpaintPipeline, DDIMScheduler, UNet2DConditionModel
8
 
9
+ ratios_map = {
10
+ 0.5:{"width":704,"height":1408},
11
+ 0.57:{"width":768,"height":1344},
12
+ 0.68:{"width":832,"height":1216},
13
+ 0.72:{"width":832,"height":1152},
14
+ 0.78:{"width":896,"height":1152},
15
+ 0.82:{"width":896,"height":1088},
16
+ 0.88:{"width":960,"height":1088},
17
+ 0.94:{"width":960,"height":1024},
18
+ 1.00:{"width":1024,"height":1024},
19
+ 1.13:{"width":1088,"height":960},
20
+ 1.21:{"width":1088,"height":896},
21
+ 1.29:{"width":1152,"height":896},
22
+ 1.38:{"width":1152,"height":832},
23
+ 1.46:{"width":1216,"height":832},
24
+ 1.67:{"width":1280,"height":768},
25
+ 1.75:{"width":1344,"height":768},
26
+ 2.00:{"width":1408,"height":704}
27
+ }
28
+ ratios = np.array(ratios_map.keys())
29
+
30
+ def get_size(init_image):
31
+ w,h=init_image.size
32
+ curr_ratio = w/h
33
+ ind = np.argmin(np.abs(curr_ratio-ratios))
34
+ ratio = ratios[ind]
35
+ chosen_ratio = ratios_map[ratio]
36
+ w,h = chosen_ratio['width'], chosen_ratio['height']
37
+
38
+ return w,h
39
 
40
  device = "cuda" if torch.cuda.is_available() else "cpu"
41
  unet = UNet2DConditionModel.from_pretrained(
 
72
  if negative_prompt == "":
73
  negative_prompt = None
74
 
75
+
76
+ init_image = dict["image"].convert("RGB")#.resize((1024, 1024))
77
+ mask = dict["mask"].convert("RGB")#.resize((1024, 1024))
78
+
79
+ w,h = get_size(init_image)
80
+
81
+ init_image = init_image.resize((w, h))
82
+ mask = mask.resize((w, h))
83
+
84
+ # Resize to nearest ratio ?
85
+
86
+ mask = np.array(mask)
87
+ mask[mask>0]=255
88
+ mask = Image.fromarray(mask)
89
 
90
  output = pipe(prompt = prompt, negative_prompt=negative_prompt, image=init_image, mask_image=mask, guidance_scale=guidance_scale, num_inference_steps=int(steps), strength=strength)
91