Spaces:
Sleeping
Sleeping
File size: 1,318 Bytes
2ec2ebd ea8200b c06e2e6 a111dfa 2ec2ebd 887521b 2a99ad2 887521b 2ec2ebd 887521b 9035502 887521b ca6c1fd 887521b ca6c1fd 887521b c0237de 887521b 855f7f8 0378b53 887521b c0237de b1e8435 c0237de 2ec2ebd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
import torch
import numpy as np
import gradio as gr
model_dict = {
'Stable Diffusion 1.4': "CompVis/stable-diffusion-v1-4",
'Stable Diffusion 2.1': "stabilityai/stable-diffusion-2-1",
}
model_num_of_layers = {
'Stable Diffusion 1.4': 12,
'Stable Diffusion 2.1': 22,
}
# global variable
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16 if str(device).__contains__("cuda") else torch.float32
def get_images(prompt, skip_layers, model, seed):
model_name = model_dict[model]
pipeline = StableDiffusionPipeline.from_pretrained(
model_name,
torch_dtype=dtype,
variant="fp16",
add_watermarker=False,
)
# Move the pipeline to the device
pipeline.to(device)
pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config)
print('inside get images')
layer = model_num_of_layers[model] - skip_layers
gr.Info(f"Generating an from image layer number {layer}")
print(f'skipping {skip_layers}')
pipeline_output = pipeline(prompt, clip_skip=skip_layers, num_images_per_prompt=1, return_tensors=False, seed=seed)
print('after pipeline')
images = pipeline_output.images
print('got images')
return images |