cm107 commited on
Commit
b84be05
1 Parent(s): 61ef756
Files changed (1) hide show
  1. app.py +12 -0
app.py CHANGED
@@ -11,10 +11,22 @@ from diffusers.utils import PIL_INTERPOLATION, load_image, torch_device
11
 
12
  from pkg.util import img_binary_data_to_pil, resizePilToMaxSide, pil_to_base64
13
 
 
 
14
 
 
15
  torch.backends.cuda.matmul.allow_tf32 = False
16
  ldm = LDMSuperResolutionPipeline.from_pretrained("duongna/ldm-super-resolution", device_map="auto")
17
  ldm.to(torch_device)
 
 
 
 
 
 
 
 
 
18
  ldm.set_progress_bar_config(disable=None)
19
  generator = torch.Generator(device=torch_device).manual_seed(0)
20
 
 
11
 
12
  from pkg.util import img_binary_data_to_pil, resizePilToMaxSide, pil_to_base64
13
 
14
+ if False:
15
+ torch_device = 'cpu'
16
 
17
+ print(f'Running inference on {torch_device}')
18
  torch.backends.cuda.matmul.allow_tf32 = False
19
  ldm = LDMSuperResolutionPipeline.from_pretrained("duongna/ldm-super-resolution", device_map="auto")
20
  ldm.to(torch_device)
21
+
22
+ if False:
23
+ print(f"{ldm.device=}")
24
+ print(f"{type(ldm.components)=}")
25
+ print(f"{ldm.components.keys()=}")
26
+ print(f"{ldm.components['vqvae'].device=}")
27
+ print(f"{ldm.components['unet'].device=}")
28
+ print(f"{ldm.components['scheduler'].config=}")
29
+
30
  ldm.set_progress_bar_config(disable=None)
31
  generator = torch.Generator(device=torch_device).manual_seed(0)
32