Ashoka74 commited on
Commit
23129bc
ยท
verified ยท
1 Parent(s): 8f819f8

Update merged_app2.py

Browse files
Files changed (1) hide show
  1. merged_app2.py +4 -4
merged_app2.py CHANGED
@@ -18,7 +18,7 @@ import numpy as np
18
  import torch
19
  import safetensors.torch as sf
20
  import datetime
21
- from pathlib import Path
22
  from io import BytesIO
23
 
24
  from PIL import Image
@@ -493,7 +493,7 @@ pipe = prepare_pipeline(
493
 
494
  pipe.enable_model_cpu_offload()
495
  pipe.enable_vae_slicing()
496
- pipe.enable_xformers_memory_efficient_attention()
497
 
498
  # Move models to device with consistent dtype
499
  text_encoder = text_encoder.to(device=device, dtype=dtype)
@@ -545,7 +545,7 @@ t2i_pipe = StableDiffusionPipeline(
545
 
546
  t2i_pipe.enable_model_cpu_offload()
547
  t2i_pipe.enable_vae_slicing()
548
- t2i_pipe.enable_xformers_memory_efficient_attention()
549
 
550
  i2i_pipe = StableDiffusionImg2ImgPipeline(
551
  vae=vae,
@@ -561,7 +561,7 @@ i2i_pipe = StableDiffusionImg2ImgPipeline(
561
 
562
  i2i_pipe.enable_model_cpu_offload()
563
  i2i_pipe.enable_vae_slicing()
564
- i2i_pipe.enable_xformers_memory_efficient_attention()
565
 
566
  @torch.inference_mode()
567
  def encode_prompt_inner(txt: str):
 
18
  import torch
19
  import safetensors.torch as sf
20
  import datetime
21
+ from pathlib import Paths
22
  from io import BytesIO
23
 
24
  from PIL import Image
 
493
 
494
  pipe.enable_model_cpu_offload()
495
  pipe.enable_vae_slicing()
496
+ #pipe.enable_xformers_memory_efficient_attention()
497
 
498
  # Move models to device with consistent dtype
499
  text_encoder = text_encoder.to(device=device, dtype=dtype)
 
545
 
546
  t2i_pipe.enable_model_cpu_offload()
547
  t2i_pipe.enable_vae_slicing()
548
+ #t2i_pipe.enable_xformers_memory_efficient_attention()
549
 
550
  i2i_pipe = StableDiffusionImg2ImgPipeline(
551
  vae=vae,
 
561
 
562
  i2i_pipe.enable_model_cpu_offload()
563
  i2i_pipe.enable_vae_slicing()
564
+ #i2i_pipe.enable_xformers_memory_efficient_attention()
565
 
566
  @torch.inference_mode()
567
  def encode_prompt_inner(txt: str):