from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler import torch model_dict = { 'sd1': "CompVis/stable-diffusion-v1-4", 'sd2': "stabilityai/stable-diffusion-2-1", } model_num_of_layers = { 'sd1': 12, 'sd2': 22, } # global variable device = "cuda" if torch.cuda.is_available() else "cpu" dtype = torch.float16 if str(device).__contains__("cuda") else torch.float32 def get_images(prompt, skip_layers, model, seed): model_name = model_dict[model] pipeline = StableDiffusionPipeline.from_pretrained( model_name, torch_dtype=dtype, variant="fp16", add_watermarker=False, ) # Move the pipeline to the device pipeline.to(device) pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) print('inside get images') layer = model_num_of_layers[model] - skip_layers gr.Info(f:"Generating image from {layer}'th layer") print(f'skipping {skip_layers}') pipeline_output = pipeline(prompt, clip_skip=skip_layers, num_images_per_prompt=1, return_tensors=False, seed=seed) print('after pipeline') images = pipeline_output.images print('got images') return images