| import glob
|
| import logging
|
| import numpy as np
|
| import os
|
| import tempfile
|
| from collections import OrderedDict
|
| import torch
|
| from PIL import Image
|
|
|
| from detectron2.data import MetadataCatalog
|
| from detectron2.utils import comm
|
| from detectron2.utils.file_io import PathManager
|
| from detectron2.evaluation.cityscapes_evaluation import CityscapesEvaluator
|
| class CityscapesInstanceEvaluator(CityscapesEvaluator):
|
| """
|
| Evaluate instance segmentation results on cityscapes dataset using cityscapes API.
|
|
|
| Note:
|
| * It does not work in multi-machine distributed training.
|
| * It contains a synchronization, therefore has to be used on all ranks.
|
| * Only the main process runs evaluation.
|
| """
|
|
|
| def process(self, inputs, outputs):
|
| from cityscapesscripts.helpers.labels import name2label
|
|
|
| for input, output in zip(inputs, outputs):
|
| file_name = input["file_name"]
|
| basename = os.path.splitext(os.path.basename(file_name))[0]
|
| pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt")
|
|
|
| if "instances" in output:
|
| output = output["instances"].to(self._cpu_device)
|
| num_instances = len(output)
|
| with open(pred_txt, "w") as fout:
|
| for i in range(num_instances):
|
| pred_class = output.pred_classes[i]
|
| classes = self._metadata.thing_classes[pred_class]
|
| class_id = name2label[classes].id
|
| score = output.scores[i]
|
| mask = output.pred_masks[i].numpy().astype("uint8")
|
| classes = classes.replace(' ','_')
|
| png_filename = os.path.join(
|
| self._temp_dir, basename + "_{}_{}.png".format(i, classes)
|
| )
|
|
|
| Image.fromarray(mask * 255).save(png_filename)
|
| fout.write(
|
| "{} {} {}\n".format(os.path.basename(png_filename), class_id, score)
|
| )
|
| else:
|
|
|
| with open(pred_txt, "w") as fout:
|
| pass
|
|
|
| def evaluate(self):
|
| """
|
| Returns:
|
| dict: has a key "segm", whose value is a dict of "AP" and "AP50".
|
| """
|
| comm.synchronize()
|
| if comm.get_rank() > 0:
|
| return
|
| import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval
|
|
|
| self._logger.info("Evaluating results under {} ...".format(self._temp_dir))
|
|
|
|
|
| cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
|
| cityscapes_eval.args.predictionWalk = None
|
| cityscapes_eval.args.JSONOutput = False
|
| cityscapes_eval.args.colorized = False
|
| cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json")
|
|
|
|
|
|
|
| gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
|
| groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png"))
|
| assert len(
|
| groundTruthImgList
|
| ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
|
| cityscapes_eval.args.groundTruthSearch
|
| )
|
| predictionImgList = []
|
| for gt in groundTruthImgList:
|
| predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args))
|
|
|
| results = cityscapes_eval.evaluateImgLists(
|
| predictionImgList, groundTruthImgList, cityscapes_eval.args
|
| )["averages"]
|
|
|
| ret = OrderedDict()
|
| ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100}
|
| self._working_dir.cleanup()
|
| return ret |