# custom_node_furniture_mask.py by StyleSpace (and GPT4) import torch import torchvision.transforms as T from torchvision.models.segmentation import deeplabv3_resnet50 from PIL import Image class FurnitureMaskNode: def __init__(self): self.model = deeplabv3_resnet50(pretrained=True).eval() self.transforms = T.Compose([ T.Resize(256), T.CenterCrop(224), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) @classmethod def INPUT_TYPES(cls): return { "required": { "input_image": ("IMAGE",), }, } RETURN_TYPES = ("IMAGE", "MASK") FUNCTION = "detect_furniture" CATEGORY = "custom" def detect_furniture(self, input_image): input_image = Image.fromarray((input_image * 255).astype('uint8')) input_tensor = self.transforms(input_image).unsqueeze(0) with torch.no_grad(): output = self.model(input_tensor)['out'][0] output_predictions = output.argmax(0) non_furniture_classes = list(range(1, 151)) # Adjust the range based on ADE20K classes furniture_classes = [5, 10, 20, 25, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71] # Based on ADE20K non_furniture_classes = [cls for cls in non_furniture_classes if cls not in furniture_classes] mask = torch.zeros_like(output_predictions, dtype=torch.bool) for cls in non_furniture_classes: mask |= (output_predictions == cls) mask = ~mask masked_image = input_image * mask.unsqueeze(-1).float() return masked_image, mask NODE_CLASS_MAPPINGS = { "FurnitureMask": FurnitureMaskNode }