Spaces:
Sleeping
Sleeping
File size: 8,369 Bytes
b213d84 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 |
# Copyright (c) Facebook, Inc. and its affiliates.
import glob
import logging
import numpy as np
import os
import tempfile
from collections import OrderedDict
import torch
from PIL import Image
from detectron2.data import MetadataCatalog
from detectron2.utils import comm
from detectron2.utils.file_io import PathManager
from .evaluator import DatasetEvaluator
class CityscapesEvaluator(DatasetEvaluator):
"""
Base class for evaluation using cityscapes API.
"""
def __init__(self, dataset_name):
"""
Args:
dataset_name (str): the name of the dataset.
It must have the following metadata associated with it:
"thing_classes", "gt_dir".
"""
self._metadata = MetadataCatalog.get(dataset_name)
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
def reset(self):
self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_")
self._temp_dir = self._working_dir.name
# All workers will write to the same results directory
# TODO this does not work in distributed training
assert (
comm.get_local_size() == comm.get_world_size()
), "CityscapesEvaluator currently do not work with multiple machines."
self._temp_dir = comm.all_gather(self._temp_dir)[0]
if self._temp_dir != self._working_dir.name:
self._working_dir.cleanup()
self._logger.info(
"Writing cityscapes results to temporary directory {} ...".format(self._temp_dir)
)
class CityscapesInstanceEvaluator(CityscapesEvaluator):
"""
Evaluate instance segmentation results on cityscapes dataset using cityscapes API.
Note:
* It does not work in multi-machine distributed training.
* It contains a synchronization, therefore has to be used on all ranks.
* Only the main process runs evaluation.
"""
def process(self, inputs, outputs):
from cityscapesscripts.helpers.labels import name2label
for input, output in zip(inputs, outputs):
file_name = input["file_name"]
basename = os.path.splitext(os.path.basename(file_name))[0]
pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt")
if "instances" in output:
output = output["instances"].to(self._cpu_device)
num_instances = len(output)
with open(pred_txt, "w") as fout:
for i in range(num_instances):
pred_class = output.pred_classes[i]
classes = self._metadata.thing_classes[pred_class]
class_id = name2label[classes].id
score = output.scores[i]
mask = output.pred_masks[i].numpy().astype("uint8")
png_filename = os.path.join(
self._temp_dir, basename + "_{}_{}.png".format(i, classes)
)
Image.fromarray(mask * 255).save(png_filename)
fout.write(
"{} {} {}\n".format(os.path.basename(png_filename), class_id, score)
)
else:
# Cityscapes requires a prediction file for every ground truth image.
with open(pred_txt, "w") as fout:
pass
def evaluate(self):
"""
Returns:
dict: has a key "segm", whose value is a dict of "AP" and "AP50".
"""
comm.synchronize()
if comm.get_rank() > 0:
return
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval
self._logger.info("Evaluating results under {} ...".format(self._temp_dir))
# set some global states in cityscapes evaluation API, before evaluating
cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
cityscapes_eval.args.predictionWalk = None
cityscapes_eval.args.JSONOutput = False
cityscapes_eval.args.colorized = False
cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json")
# These lines are adopted from
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png"))
assert len(
groundTruthImgList
), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
cityscapes_eval.args.groundTruthSearch
)
predictionImgList = []
for gt in groundTruthImgList:
predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args))
results = cityscapes_eval.evaluateImgLists(
predictionImgList, groundTruthImgList, cityscapes_eval.args
)["averages"]
ret = OrderedDict()
ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100}
self._working_dir.cleanup()
return ret
class CityscapesSemSegEvaluator(CityscapesEvaluator):
"""
Evaluate semantic segmentation results on cityscapes dataset using cityscapes API.
Note:
* It does not work in multi-machine distributed training.
* It contains a synchronization, therefore has to be used on all ranks.
* Only the main process runs evaluation.
"""
def process(self, inputs, outputs):
from cityscapesscripts.helpers.labels import trainId2label
for input, output in zip(inputs, outputs):
file_name = input["file_name"]
basename = os.path.splitext(os.path.basename(file_name))[0]
pred_filename = os.path.join(self._temp_dir, basename + "_pred.png")
output = output["sem_seg"].argmax(dim=0).to(self._cpu_device).numpy()
pred = 255 * np.ones(output.shape, dtype=np.uint8)
for train_id, label in trainId2label.items():
if label.ignoreInEval:
continue
pred[output == train_id] = label.id
Image.fromarray(pred).save(pred_filename)
def evaluate(self):
comm.synchronize()
if comm.get_rank() > 0:
return
# Load the Cityscapes eval script *after* setting the required env var,
# since the script reads CITYSCAPES_DATASET into global variables at load time.
import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval
self._logger.info("Evaluating results under {} ...".format(self._temp_dir))
# set some global states in cityscapes evaluation API, before evaluating
cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
cityscapes_eval.args.predictionWalk = None
cityscapes_eval.args.JSONOutput = False
cityscapes_eval.args.colorized = False
# These lines are adopted from
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa
gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_labelIds.png"))
assert len(
groundTruthImgList
), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
cityscapes_eval.args.groundTruthSearch
)
predictionImgList = []
for gt in groundTruthImgList:
predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt))
results = cityscapes_eval.evaluateImgLists(
predictionImgList, groundTruthImgList, cityscapes_eval.args
)
ret = OrderedDict()
ret["sem_seg"] = {
"IoU": 100.0 * results["averageScoreClasses"],
"iIoU": 100.0 * results["averageScoreInstClasses"],
"IoU_sup": 100.0 * results["averageScoreCategories"],
"iIoU_sup": 100.0 * results["averageScoreInstCategories"],
}
self._working_dir.cleanup()
return ret
|