Ashoka74 commited on
Commit
a8d538e
1 Parent(s): 428533c

Update app_2.py

Browse files
Files changed (1) hide show
  1. app_2.py +13 -11
app_2.py CHANGED
@@ -161,17 +161,7 @@ if torch.cuda.is_available():
161
  else:
162
  device = torch.device('cpu')
163
 
164
- pipe = prepare_pipeline(
165
- base_model="stabilityai/stable-diffusion-xl-base-1.0",
166
- vae_model="madebyollin/sdxl-vae-fp16-fix",
167
- unet_model=None,
168
- lora_model=None,
169
- adapter_path="huanngzh/mv-adapter",
170
- scheduler=None,
171
- num_views=NUM_VIEWS,
172
- device=device,
173
- dtype=dtype,
174
- )
175
 
176
  # 'stablediffusionapi/realistic-vision-v51'
177
  # 'runwayml/stable-diffusion-v1-5'
@@ -270,6 +260,18 @@ del sd_offset, sd_origin, sd_merged, keys
270
  device = torch.device('cuda')
271
  dtype = torch.float16 # RTX 2070 works well with float16
272
 
 
 
 
 
 
 
 
 
 
 
 
 
273
  # Memory optimizations for RTX 2070
274
  torch.backends.cudnn.benchmark = True
275
  if torch.cuda.is_available():
 
161
  else:
162
  device = torch.device('cpu')
163
 
164
+
 
 
 
 
 
 
 
 
 
 
165
 
166
  # 'stablediffusionapi/realistic-vision-v51'
167
  # 'runwayml/stable-diffusion-v1-5'
 
260
  device = torch.device('cuda')
261
  dtype = torch.float16 # RTX 2070 works well with float16
262
 
263
+ pipe = prepare_pipeline(
264
+ base_model="stabilityai/stable-diffusion-xl-base-1.0",
265
+ vae_model="madebyollin/sdxl-vae-fp16-fix",
266
+ unet_model=None,
267
+ lora_model=None,
268
+ adapter_path="huanngzh/mv-adapter",
269
+ scheduler=None,
270
+ num_views=NUM_VIEWS,
271
+ device=device,
272
+ dtype=dtype,
273
+ )
274
+
275
  # Memory optimizations for RTX 2070
276
  torch.backends.cudnn.benchmark = True
277
  if torch.cuda.is_available():