Spaces:
Running
Running
File size: 3,204 Bytes
a660631 7a1ec93 a660631 7a1ec93 f521e88 7a1ec93 a660631 f521e88 a660631 f521e88 7a1ec93 a660631 f521e88 a660631 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
import cv2
import numpy as np
import PIL.Image
import torch
from controlnet_aux.util import HWC3, ade_palette
from transformers import AutoImageProcessor, UperNetForSemanticSegmentation, OneFormerProcessor, OneFormerForUniversalSegmentation
from cv_utils import resize_image
class ImageSegmentor:
def __init__(self):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small")
self.image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small")
self.image_segmentor.to(self.device)
@torch.inference_mode()
def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image:
detect_resolution = kwargs.pop("detect_resolution", 512)
image_resolution = kwargs.pop("image_resolution", 512)
image = HWC3(image)
image = resize_image(image, resolution=detect_resolution)
image = PIL.Image.fromarray(image)
pixel_values = self.image_processor(image, return_tensors="pt").pixel_values
outputs = self.image_segmentor(pixel_values.to(self.device))
seg = self.image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0].cpu()
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
for label, color in enumerate(ade_palette()):
color_seg[seg == label, :] = color
color_seg = color_seg.astype(np.uint8)
color_seg = resize_image(color_seg, resolution=image_resolution, interpolation=cv2.INTER_NEAREST)
return PIL.Image.fromarray(color_seg)
class ImageSegmentorOneFormer:
def __init__(self):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.image_processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
self.image_segmentor = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
self.image_segmentor.to(self.device)
@torch.inference_mode()
def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image:
detect_resolution = kwargs.pop("detect_resolution", 512)
image_resolution = kwargs.pop("image_resolution", 512)
image = HWC3(image)
image = resize_image(image, resolution=detect_resolution)
image = PIL.Image.fromarray(image)
inputs = self.image_processor(image, ["semantic"], return_tensors="pt")
inputs = {k: v.to(self.device) if isinstance(v, torch.Tensor) else v for k, v in inputs.items()}
outputs = self.image_segmentor(**inputs)
seg = self.image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0].cpu()
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
for label, color in enumerate(ade_palette()):
color_seg[seg == label, :] = color
color_seg = color_seg.astype(np.uint8)
color_seg = resize_image(color_seg, resolution=image_resolution, interpolation=cv2.INTER_NEAREST)
return PIL.Image.fromarray(color_seg)
|