Spaces:
Sleeping
Sleeping
from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler | |
import torch | |
import numpy as np | |
import gradio as gr | |
model_dict = { | |
'Stable Diffusion 1.4': "CompVis/stable-diffusion-v1-4", | |
'Stable Diffusion 2.1': "stabilityai/stable-diffusion-2-1", | |
} | |
model_num_of_layers = { | |
'Stable Diffusion 1.4': 12, | |
'Stable Diffusion 2.1': 22, | |
} | |
# global variable | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
dtype = torch.float16 if str(device).__contains__("cuda") else torch.float32 | |
def get_images(prompt, skip_layers, model, seed): | |
model_name = model_dict[model] | |
pipeline = StableDiffusionPipeline.from_pretrained( | |
model_name, | |
torch_dtype=dtype, | |
variant="fp16", | |
add_watermarker=False, | |
) | |
# Move the pipeline to the device | |
pipeline.to(device) | |
pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) | |
print('inside get images') | |
layer = model_num_of_layers[model] - skip_layers | |
gr.Info(f"Generating an image from layer number {layer}") | |
print(f'skipping {skip_layers}') | |
pipeline_output = pipeline(prompt, clip_skip=skip_layers, num_images_per_prompt=1, return_tensors=False, seed=seed) | |
print('after pipeline') | |
images = pipeline_output.images | |
print('got images') | |
return images |