File size: 2,882 Bytes
32e2fdf 10397af 32e2fdf bd76182 32e2fdf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
#!/usr/bin/env python3
import numpy as np
import torch
from transformers.tools.base import Tool, get_default_device
from transformers.utils import (
is_accelerate_available,
is_vision_available,
is_opencv_available,
)
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
if is_vision_available():
from PIL import Image
if is_opencv_available():
import cv2
IMAGE_TRANSFORMATION_DESCRIPTION = (
"This is a tool that transforms an image with ControlNet according to a prompt. It takes two inputs: `image`, which should be "
"the image to transform, and `prompt`, which should be the prompt to use to change it. It returns the "
"modified image."
)
class ControlNetTransformationTool(Tool):
default_stable_diffusion_checkpoint = "runwayml/stable-diffusion-v1-5"
default_controlnet_checkpoint = "lllyasviel/control_v11p_sd15_canny"
description = IMAGE_TRANSFORMATION_DESCRIPTION
name = "image_transformer"
inputs = ['image', 'text']
outputs = ['image']
def __init__(self, device=None, controlnet=None, stable_diffusion=None, **hub_kwargs) -> None:
if not is_accelerate_available():
raise ImportError("Accelerate should be installed in order to use tools.")
if not is_vision_available():
raise ImportError("Pillow should be installed in order to use the StableDiffusionTool.")
super().__init__()
self.stable_diffusion = self.default_stable_diffusion_checkpoint
self.controlnet = self.default_controlnet_checkpoint
self.device = device
self.hub_kwargs = hub_kwargs
def setup(self):
if self.device is None:
self.device = get_default_device()
controlnet = ControlNetModel.from_pretrained(self.controlnet)
self.pipeline = StableDiffusionControlNetPipeline.from_pretrained(self.stable_diffusion, controlnet=controlnet)
self.pipeline.scheduler = UniPCMultistepScheduler.from_config(self.pipeline.scheduler.config)
self.pipeline.to(self.device)
if self.device.type == "cuda":
self.pipeline.to(torch_dtype=torch.float16)
self.is_initialized = True
def __call__(self, image, prompt):
if not self.is_initialized:
self.setup()
image = np.array(image)
image = cv2.Canny(image, 100, 200)
image = image[:, :, None]
image = np.concatenate([image, image, image], axis=2)
image = Image.fromarray(image)
negative_prompt = "low quality, bad quality, deformed, low resolution"
added_prompt = " , highest quality, highly realistic, very high resolution"
return self.pipeline(
prompt + added_prompt,
image,
negative_prompt=negative_prompt,
num_inference_steps=30,
).images[0]
|