multimodalart HF staff commited on
Commit
0274969
1 Parent(s): 26910d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -9
app.py CHANGED
@@ -7,15 +7,10 @@ import spaces
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
 
10
- if torch.cuda.is_available():
11
- torch.cuda.max_memory_allocated(device=device)
12
- pipe = AuraFlowPipeline.from_pretrained("AuraDiffusion/auradiffusion-v0.1a0",
13
- torch_dtype=torch.float16)
14
- pipe.enable_xformers_memory_efficient_attention()
15
- pipe = pipe.to(device)
16
- else:
17
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
18
- pipe = pipe.to(device)
19
 
20
  MAX_SEED = np.iinfo(np.int32).max
21
  MAX_IMAGE_SIZE = 1024
 
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
 
10
+ pipeline = AuraFlowPipeline(
11
+ "AuraDiffusion/auradiffusion-v0.1a0",
12
+ torch_dtype=torch.float16
13
+ ).to("cuda")
 
 
 
 
 
14
 
15
  MAX_SEED = np.iinfo(np.int32).max
16
  MAX_IMAGE_SIZE = 1024