File size: 1,738 Bytes
478e069 a79d2e0 79eafc7 a79d2e0 ea9668d a79d2e0 ea9668d a79d2e0 d903538 ea9668d a79d2e0 8937a2b ea9668d a79d2e0 ea9668d a79d2e0 ea9668d 79eafc7 ea9668d a79d2e0 ea9668d a79d2e0 79eafc7 ea9668d a79d2e0 ea9668d a79d2e0 ea9668d c789cd8 a79d2e0 ea9668d a79d2e0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
# custom_node_furniture_mask.py by StyleSpace (and GPT4)
import torch
import torchvision.transforms as T
from torchvision.models.segmentation import deeplabv3_resnet50
from PIL import Image
class FurnitureMaskNode:
def __init__(self):
self.model = deeplabv3_resnet50(pretrained=True).eval()
self.transforms = T.Compose([
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"input_image": ("IMAGE",),
},
}
RETURN_TYPES = ("IMAGE", "MASK")
FUNCTION = "detect_furniture"
CATEGORY = "custom"
def detect_furniture(self, input_image):
input_image = Image.fromarray((input_image * 255).astype('uint8'))
input_tensor = self.transforms(input_image).unsqueeze(0)
with torch.no_grad():
output = self.model(input_tensor)['out'][0]
output_predictions = output.argmax(0)
non_furniture_classes = list(range(1, 151)) # Adjust the range based on ADE20K classes
furniture_classes = [5, 10, 20, 25, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71] # Based on ADE20K
non_furniture_classes = [cls for cls in non_furniture_classes if cls not in furniture_classes]
mask = torch.zeros_like(output_predictions, dtype=torch.bool)
for cls in non_furniture_classes:
mask |= (output_predictions == cls)
mask = ~mask
masked_image = input_image * mask.unsqueeze(-1).float()
return masked_image, mask
NODE_CLASS_MAPPINGS = {
"FurnitureMask": FurnitureMaskNode
}
|