Update app.py
Browse files
app.py
CHANGED
@@ -23,7 +23,6 @@ CSS = """
|
|
23 |
|
24 |
# Ensure model and scheduler are initialized in GPU-enabled function
|
25 |
unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
|
26 |
-
unet.load_state_dict(torch.load(hf_hub_download(repo, checkpoints)), map_location="cuda")
|
27 |
if torch.cuda.is_available():
|
28 |
pipe = DiffusionPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
|
29 |
|
@@ -38,6 +37,7 @@ def generate_image(prompt, ckpt):
|
|
38 |
num_inference_steps = checkpoints[ckpt][1]
|
39 |
|
40 |
if loaded != num_inference_steps:
|
|
|
41 |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", prediction_type="sample" if num_inference_steps==1 else "epsilon")
|
42 |
loaded = num_inference_steps
|
43 |
|
|
|
23 |
|
24 |
# Ensure model and scheduler are initialized in GPU-enabled function
|
25 |
unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
|
|
|
26 |
if torch.cuda.is_available():
|
27 |
pipe = DiffusionPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
|
28 |
|
|
|
37 |
num_inference_steps = checkpoints[ckpt][1]
|
38 |
|
39 |
if loaded != num_inference_steps:
|
40 |
+
unet.load_state_dict(torch.load(hf_hub_download(repo, checkpoints)), map_location="cuda")
|
41 |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", prediction_type="sample" if num_inference_steps==1 else "epsilon")
|
42 |
loaded = num_inference_steps
|
43 |
|