SciVisAgentBench-tasks / topology /topologyScoring.py
KuangshiAi
add 5 topology cases from Nathaniel Gorski
4fe94e5
raw
history blame
49.6 kB
import numpy as np
import os
import vtk
import ot
import networkx as nx
import collections
import gudhi.wasserstein
import math
from vtk.util.numpy_support import vtk_to_numpy
from scipy.spatial.distance import cdist
from scipy.optimize import linear_sum_assignment
from typing import Any
# We provide parameters that can be used to adjust our various scoring algorithms. The parameters
# are sorted by which algorithms they refer to.
# It may be helpful to read descriptions of the algorithms themselves before attempting to understand
# the meaning of these parameters.
# Set to True to allow data that is not perfectly predicted to score a perfect 10.
# If this is set to False, the highest possible score that an imperfect prediction can score is a 9.
canImperfectPredictionsScore10 = False
# ====== POINT CLOUD GEOMETRY SCORE ======
# When two points are paired with each other, the cost associated with that pairing is equal to twice their
# squared distance (namely, each point receives a cost equal to the squared distance between them).
# This parameter imposes an additional flat cost to a pair of points depending on if their type classifications are different.
costMatrixTypeMismatchPenalty = 1000
# If two points have a cost above this threshold, they are not allowed to pair with each other.
costThresholdToPair = 100
# If a point remains unpaired, how much should it contribute to the total cost.
# In order to ensure that it is always better for a point to pair with another versus remain unpaired, ensure that this
# value is at least half of the previous parameter. That way, the total cost for two points to remain unpaired, which is
# two times the value of this parameter, exceeds the maximum possible if those two points were paired.
unpairedPointCost = 50
# What is the maximum average cost per point that should recieve any points. Any average cost higher than this will receive a score of 0.
maximumAverageCost = 50
# ====== DICE SCORES ======
# The minimum dice score that must be scored in order to score any points (any lower score receives a 0)
# Should be a float from 0-1.
minimumDiceScore = 0.3
# If the dice score is computed after resampling meshes to a common resolution, the dice score will not typically
# be a perfect 1.0 even if the model did nothing wrong. If rescaling occurs, then any dice score above this
# margin will score a perfect 10.
resamplingMarginForPerfect = 0.99
# ====== MERGE TREES PARTIAL FUSED GW DISTANCE SCORE ======
# This controls the tradeoff between Wasserstein and GW distance when performing on the OT computation.
alpha = 0.5
# This is the maximum partial fused GW distance that can score any points. Any distance above this threshold will score a 0.
maximumPFGWDistance = 0.5
# Cutoff distance for returning a perfect 10. Due to numerical issues, if the reconstructed data is perfect, the OT distance
# computed is unlikely to be exactly 0. Thus, we return a perfect 10 if the distance is below this threshold.
perfectPFGWDistanceCutoff = 1e-10
# ====== MERGE TREE PERSISTENCE DIAGRAM WASSERSTEIN SCORE ======
# see https://gudhi.inria.fr/python/latest/wasserstein_distance_user.html
# The order of the Wasserstein distance
wassersteinOrder = 1.0
# The ground metric used for computing the Wasserstein distance
wassersteinGroundMetric = float('inf')
# This is the maximum average Wasserstein distance (the average is taken over (|P|+|Q|)/2) that can score points.
# Any distance above this score will score a 0.
maximumAverageWassersteinDistance = 0.2
def _convertPointsToArraysSortedByType(pointsFilename : str, pointsTypeArrayName : str) -> dict[Any, np.ndarray]:
"""
Converts a set of labeled points into a dictionary that sorts the points by label.
Args:
pointsFilename: The name of a file in legacy VTK format (.vtk) that stores a point cloud.
pointsTypeArrayName: The name of a point array which classifies each point by type.
the values should be categorical (e.g., integers representing the indexes of critical points.
Returns:
A python dictionary. The keys are the different types of points. Each value is an nx3 numpy array, where each
row corresponds to a different point. If the input point cloud is 2D, then the z coordinate of each point will
be set to 0.
"""
scriptName = os.path.basename(__file__)
# read in the points file
if not os.path.isfile(pointsFilename):
raise FileNotFoundError(f"{scriptName}: no such file: '{pointsFilename}'")
reader = vtk.vtkDataSetReader()
reader.SetFileName(pointsFilename)
reader.Update()
output = reader.GetOutput()
if output is None:
raise ValueError(f"{scriptName}: file '{pointsFilename}' is not a legacy .vtk file")
# read in the classification array
pointData = output.GetPointData()
if pointData is None:
raise ValueError(f"{scriptName}: file '{pointsFilename}' does not have any point data")
array = pointData.GetArray(pointsTypeArrayName)
if array is None:
raise ValueError(f"{scriptName}: file '{pointsFilename}' does not have a point array called {pointsTypeArrayName}")
numPoints = output.GetNumberOfPoints()
# extract all points and sort them by classification
# keys: type, values: list of point coordinates
pointLists = {}
for idx in range(numPoints):
type_ = array.GetTuple1(idx)
if type_ not in pointLists:
pointLists[type_] = []
pointCoords = output.GetPoint(idx)
if len(pointCoords) == 2:
pointCoords = (pointCoords[0], pointCoords[1], 0)
pointLists[type_].append(pointCoords)
# convert the extracted points to numpy array and return
pointsDict = {}
for type_ in pointLists:
pointsDict[type_] = np.array(pointLists[type_])
return pointsDict
def _padMatrixToSquare(matrix : np.ndarray, padValue : Any) -> np.ndarray:
"""
Pads a 2D numpy array into a square matrix. New rows/columns will take a user defined value.
Args:
matrix: A 2D numpy array.
padValue: The value that the padded rows/columns should contain.
Returns:
The paddeds square matrix.
"""
n = max(matrix.shape[0], matrix.shape[1])
return np.pad( matrix, ( (0, n-matrix.shape[0]), (0, n-matrix.shape[1]) ), constant_values = padValue )
def pointCloudGeometryScore(gtPointsFilename : str, gtPointsArrayName : str, reconPointsFilename : str, reconPointsArrayName : str, verbose : bool = False) -> int:
"""
Given two different point clouds, where each point in each point cloud has a type assigned to it,
assign a score of 0-10 for how well they match.
**The current algorithm is subject to change, but the function header will remain the same.**
Currently, in order to assess how well the reconstructed data
approximates the original, we seek to pair up each point in the ground truth with its corresponding point in the reconstructed data.
However, this raises two questions: (a) How close do two points need to be to be considered "paired"? and (b) How should we
penalize extra / missing points versus the distance between corresponding paired points? To answer these questions,
we use the following algorithm:
For each point p in the ground truth data, we assign a cost to pair p with each point q in the reconstructed data.
We also assign a cost to leave p unpaired. The way of determining the cost relies on parameters defined at the top of the file.
These costs are determined as follows:
i. The cost to pair two points p and q if d(p,q)^2
ii. If p and q have different types, we add the value of costMatrixTypeMismatchPenalty to the cost.
iii. If the cost of pairing p with q exceeds costThresholdToPair, then p is not allowed to pair with q (answering question (a))
iv. Any point may pair or remain unpaired
v. The cost of leaving a point unpaired is unpairedPointCost (answering question (b))
After defining these costs, we use the Hungarian algorithm to compute the optimal pairing. The score is based on the average cost
from all points in P and Q. The lowest cost that scores a 0 is given by maximumAverageCost. All lower scores assign a score of
0-10 linearly.
Args:
gtPointsFilename: The name of a file in legacy VTK format (.vtk) that stores the ground truth point cloud.
gtPointsArrayName: The name of the array in the the GT points file that classifies each point. This should store
a categorical value (such as the index of critical points).
reconPointsFilename: The name of a file in legacy VTK format (.vtk) that stores the reconstructed point cloud.
reconPointsArrayName: The name of the array in the reconstructed points file that classifies each point.
verbose: If there is an error with either of the files, should messages be printed out?
Returns:
A score of 0-10 assessing how well the reconstructed points approximate the ground truth file. A score of 10 means
very good approximation while a score of 0 means a very poor approximation.
"""
# sort points by classification
gtPointsDict = _convertPointsToArraysSortedByType(gtPointsFilename, gtPointsArrayName)
try:
reconPointsDict = _convertPointsToArraysSortedByType(reconPointsFilename, reconPointsArrayName)
except Exception as e:
if verbose:
print(e)
return 0
# Produce the cost matrix for pairing.
# First, we stack all points from ground truth and reconstructed data into matrices sorted by type
# and find the indices in the stacked matrix where each type starts.
allTypes = list(gtPointsDict.keys())
for type_ in reconPointsDict.keys():
if type_ not in allTypes:
allTypes.append(type_)
gtTypeStartIndices = {}
reconTypeStartIndices = {}
nextIdxGT = 0
nextIdxRecon = 0
allGTPointsList = []
allReconPointsList = []
for type_ in allTypes:
gtTypeStartIndices[type_] = nextIdxGT
if type_ in gtPointsDict:
nextIdxGT += gtPointsDict[type_].shape[0]
allGTPointsList.append(gtPointsDict[type_])
reconTypeStartIndices[type_] = nextIdxRecon
if type_ in reconPointsDict:
nextIdxRecon += reconPointsDict[type_].shape[0]
allReconPointsList.append(gtPointsDict[type_])
allGTCPs = np.vstack(allGTPointsList).astype(np.float64)
allReconCPs = np.vstack(allReconPointsList).astype(np.float64)
# add a dummy element to the end of the types for algorithmic ease
newElt = max([type_ for type_ in allTypes if type_ != float('inf')]) + 1
allTypes.append(newElt)
gtTypeStartIndices[newElt] = allGTCPs.shape[0]
reconTypeStartIndices[newElt] = allReconCPs.shape[0]
# The cost matrix starts by computing squared distances.
# We assume that all points have different types and then subtract the mismatch penalty from
# pairs of points with the same type.
costMatrix = cdist(allGTCPs, allReconCPs, metric="sqeuclidean") + costMatrixTypeMismatchPenalty
for i in range(len(allTypes)-1):
type_ = allTypes[i]
nextType = allTypes[i+1]
gtTypeStart = gtTypeStartIndices[type_]
gtTypeEnd = gtTypeStartIndices[nextType]
reconTypeStart = reconTypeStartIndices[type_]
reconTypeEnd = reconTypeStartIndices[nextType]
if gtTypeStart != gtTypeEnd and reconTypeStart != reconTypeEnd:
costMatrix[ gtTypeStart:gtTypeEnd, reconTypeStart:reconTypeEnd ] -= costMatrixTypeMismatchPenalty
costMatrix[costMatrix > costThresholdToPair] = 2 * unpairedPointCost
totalNumPoints = costMatrix.shape[0] + costMatrix.shape[1]
# pad the matrix to be square if it is not already.
# This will occur if the GT and reconstructed point clouds have different numbers of points.
# The new rows/columns correspond to the ability to leave points unpaired.
costMatrix = _padMatrixToSquare( costMatrix, unpairedPointCost )
# Compute the cost and return a score based on the cost.
rowInd, colInd = linear_sum_assignment(costMatrix)
totalCost = costMatrix[rowInd, colInd].sum()
averageCost = totalCost / totalNumPoints
if totalCost == 0:
return 10
score = round(10 * ( maximumAverageCost - averageCost ) / maximumAverageCost )
if not canImperfectPredictionsScore10 and score == 10:
return 9
if score < 0:
return 0
return score
def _getImageDataAndArray(filename : str, arrayName : str) -> tuple[vtk.vtkImageData, vtk.vtkArray]:
"""
Given a file containing VTK image data, return a variable storing the data, and an array with a given name.
Args:
filename: The name of a file storing VTK image data (.vti)
arrayName: The name of a point array in the file that should be returned.
Returns:
A tuple containing the image data and array.
"""
scriptName = os.path.basename(__file__)
if not os.path.isfile(filename):
raise FileNotFoundError(f"{scriptName}: No such file {filename}")
imageReader = vtk.vtkXMLImageDataReader()
imageReader.SetFileName(filename)
imageReader.Update()
image = imageReader.GetOutput()
if image is None:
raise ValueError(f"{scriptName}: File '{filename}' is not VTK image data")
array = image.GetPointData().GetArray(arrayName)
if array is None:
raise ValueError(f"{scriptName}: File '{filename}' has no array {arrayName}")
return image, array
def _scaleMesh(array : np.ndarray, ratio : int) -> np.ndarray:
"""
Given a numpy array, scales the numpy array to a new size and returns the scaled array.
The scaling assumes that the entries of the numpy array are the vertices of a grid. What is scaled
is the number of squares in the grid. Thus, if the input array has a width of n, the output size will
be ratio*(n-1)+1. Resampling is performed with nearest neighbor so that this is compatible with categorical data.
Args:
array: The numpy array that should be scaled
ratio: The scale factor.
Returns:
The scaled numpy array.
"""
if ratio == 1:
return array.copy()
scriptName = os.path.basename(__file__)
if int(ratio) != ratio or ratio <= 0:
raise ValueError(f"{scriptName} : ratio must be a positive integer")
ratio = int(ratio)
sizeX = ratio * (array.shape[0] - 1) + 1
sizeY = ratio * (array.shape[1] - 1) + 1
newArray = np.zeros(( sizeX, sizeY ))
# iterate through each square in the grid and scale it.
for X in range(array.shape[0]):
for Y in range(array.shape[1]):
if X == array.shape[0] - 1:
sizeX = 1
else:
sizeX = ratio
if Y == array.shape[1] - 1:
sizeY = 1
else:
sizeY = ratio
# fill in each new point in the square.
for x in range(sizeX):
if x < sizeX / 2:
xOffset = 0
else:
xOffset = 1
for y in range(sizeY):
if y < sizeY / 2:
yOffset = 0
else:
yOffset = 1
newArray[X*ratio + x, Y*ratio + y] = array[X+xOffset,Y+yOffset]
return newArray
def _resampleToCommonMesh(array1 : np.ndarray, array2 : np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""
Given two arrays storing data defined on two different grids, resample them to a grid of the same size.
In particular, we assume that the two arrays each store values defined on the vertices of a grid. The grids
should have the same aspect ratio, but may have different sizes. The size of the new grid will be the lcm
of the sizes of the input grids. Resampling uses nearest neighbor to maintain compatability with categorical data.
Args:
array1: The first array that is to be resampled.
array2: The second array that is to be resampled.
Returns:
A tuple containing the resampled versions of array1 and array2.
"""
if array1.shape == array2.shape:
return array1.copy(), array2.copy()
scriptName = os.path.basename(__file__)
numCells1 = (array1.shape[0]-1, array1.shape[1]-1)
numCells2 = (array2.shape[0]-1, array2.shape[1]-1)
newSizeX = math.lcm(numCells1[0], numCells2[0])
ratio1 = newSizeX / numCells1[0]
ratio2 = newSizeX / numCells2[0]
if ratio1 * numCells1[1] != numCells2[1]*ratio2:
raise ValueError(f"{scriptName}: arrays cannot be resampled to common mesh due to incompatible dimensions")
newArray1 = _scaleMesh(array1, ratio1)
newArray2 = _scaleMesh(array2, ratio2)
return newArray1, newArray2
def partitionAssignmentDiceScore(gtFilename : str, gtArrayName : str, reconFilename : str, reconArrayName : str,
verbose : bool = False, allowResampling : bool = False) -> int:
"""
Given two different partitions of a domain that assign each point to a category, compute a similarity score from 0-10 based on the dice score.
This computation uses the "standard" dice score. It is not concerned with topological connectivity, but only with correct
classification. That is, if two different disconnected regions of the partition both belong to the same category,
they will be considered as part of the same category, and not two separate regions. The dice score is weighted based on the
number of points in each category in the ground truth.
A score of 0 is considered bad and 10 is good. The maximum dice score that will receive a 0 is controlled by the parameter
minimumDiceScore defined at the top of the file. Scores above the minimum will scale linarly up to a 10. If resampling is allowed,
there is a margin for what is considered "perfect" which is controlled by the parameter resamplingMarginForPerfect.
Args:
gtFilename: The name of a file containing VTK image data (.vti) that stores the classification of each ground truth point.
gtPointsArrayName: The name of the array in the the GT file that classifies each point. This should store
a categorical value (such as the index of critical points).
reconFilename: The name of a file containing VTK image data (.vti) that stores the classification of each reconstructed point.
reconPointsArrayName: The name of the array in the reconstructed file that classifies each point.
verbose: Should messages be printed out if there are errors with the files.
allowResampling: If the ground truth and reconstructed files have different resolutions, should they be resampled onto a new, fine
grid so that they have the same resolution (if not, a score of 0 will be returned).
Returns:
An integer score from 0-10 that determines how similar the partitions are. A score of 0 is considered bad and 10 is good.
"""
scriptName = os.path.basename(__file__)
# load files
try:
reconImage, reconArray = _getImageDataAndArray(reconFilename, reconArrayName)
except Exception as e:
if verbose:
print(e)
return 0
reconDimensions = reconImage.GetDimensions()
gtImage, gtArray = _getImageDataAndArray(gtFilename, gtArrayName)
gtDimensions = gtImage.GetDimensions()
# check that dimensionality is correct
if len(reconDimensions) == 3:
if reconDimensions[2] != 1:
if verbose:
print(f"{scriptName}: {reconFilename} is not 2D and has shape {reconDimensions}. Excpected a 2D input.")
return 0
reconDimensions = (reconDimensions[0], reconDimensions[1])
if len(gtDimensions) == 3:
if gtDimensions[2] != 1:
raise ValueError(f"{scriptName}: ground truth file {gtFilename} is not 2D and has shape {gtDimensions}")
gtDimensions = (gtDimensions[0], gtDimensions[1])
gtArrayNumpy = vtk_to_numpy(gtArray).reshape(gtDimensions, order="F")
reconArrayNumpy = vtk_to_numpy(reconArray).reshape(reconDimensions, order="F")
# check if resolution matches and resample if necessary and able
if allowResampling:
gtArrayNumpy, reconArrayNumpy = _resampleToCommonMesh(gtArrayNumpy, reconArrayNumpy)
else:
if gtDimensions != reconDimensions:
if verbose:
print(f"{scriptName}: expected grond truth file {gtFilename} and reconstructed file {reconFilename} to have the same dimensions. Found {gtDimensions} and {reconDimensions}")
return 0
# compute the cardinalities of each category and the cardinality of their intersections
gtArrayNumpy = gtArrayNumpy.flatten()
reconArrayNumpy = reconArrayNumpy.flatten()
numPoints = gtArrayNumpy.shape[0]
overlap = {}
totalCardinality = {}
gtCardinality = {}
for i in range(numPoints):
gtVal = gtArrayNumpy[i]
reconVal = reconArrayNumpy[i]
if gtVal == reconVal:
if gtVal in overlap:
overlap[gtVal] += 1
else:
overlap[gtVal] = 1
if gtVal in totalCardinality:
totalCardinality[gtVal] += 1
else:
totalCardinality[gtVal] = 1
if reconVal in totalCardinality:
totalCardinality[reconVal] += 1
else:
totalCardinality[reconVal] = 1
if gtVal in gtCardinality:
gtCardinality[gtVal] += 1
else:
gtCardinality[gtVal] = 1
# compute the dice score and return a score based on the dice score
diceScore = 0
totalOverlap = 0
for category in gtCardinality:
totalOverlap += overlap[category]
weight = gtCardinality[category] / numPoints
dice = 2 * overlap[category] / totalCardinality[category]
diceScore += weight * dice
if allowResampling and diceScore > resamplingMarginForPerfect and (gtArrayNumpy.shape != gtDimensions or reconArrayNumpy.shape != reconDimensions):
return 10
if totalOverlap == numPoints:
return 10
score = round( 10 * (diceScore - minimumDiceScore) / (1-minimumDiceScore) )
if not canImperfectPredictionsScore10 and score == 10:
return 9
if score < 0:
return 0
return score
def _partition2DDomain(array : np.ndarray) -> np.ndarray:
"""
Given a 2D array that assigns a label to each point, identifies connected sets of points that all share a common label.
Each such connected region will be assigned a different integer id. The ids start at zero. A numpy array is returned
with the same shape as the input, where each point is assigned its region id. In order to have a triangular mesh, we
assume that each point is connected to its east, north, northwest, west, south, and southeast neighbors.
Args:
array: A 2D numpy array that stores a different categorical label for each point.
Returns:
A numpy array with the same shape as the input array. Each point is assigned the label for the connected region that it is
part of.
"""
outputPartition = np.zeros_like(array)
nextPartitionId = 0
# scan through each point. If it has not already been in a region, perform BFS to identify points in the
# same connected region and label them. Repeat until all points have been labeled.
for X in range(array.shape[0]):
for Y in range(array.shape[1]):
if outputPartition[X,Y] == 0:
# label the point and set up BFS
nextPartitionId += 1
queue = [(X,Y)]
value = array[X,Y]
# iterate through points and add neighbors
while len(queue) > 0:
point = queue.pop(0)
if outputPartition[point] == 0:
x,y = point
outputPartition[point] = nextPartitionId
if x != array.shape[0] - 1 and outputPartition[x+1,y] == 0 and array[x+1,y] == value:
queue.append((x+1,y))
if y != array.shape[1] - 1 and outputPartition[x,y+1] == 0 and array[x,y+1] == value:
queue.append((x,y+1))
if x != 0 and y != array.shape[1] - 1 and outputPartition[x-1,y+1] == 0 and array[x-1,y+1] == value:
queue.append((x-1,y+1))
if x != 0 and outputPartition[x-1,y] == 0 and array[x-1,y] == value:
queue.append((x-1,y))
if y != 0 and outputPartition[x,y-1] == 0 and array[x,y-1] == value:
queue.append((x,y-1))
if x != array.shape[0] - 1 and y != 0 and outputPartition[x+1,y-1] == 0 and array[x+1,y-1] == value:
queue.append((x+1,y-1))
return outputPartition - 1 # subtract 1 so that the lowest partition value is 0
def partitionTopologicalDiceScore(gtFilename : str, gtArrayName : str, reconFilename : str, reconArrayName : str,
verbose : bool = False, allowResampling : bool = False) -> int:
"""
Given two partitions of a domain, assign a score of 0-10 based on their topological similarity.
In each partition, a different region in the partition is defined as a connected set of points with the same label.
This function optimally matches the different regions between the two partitions such that the dice score is maximized.
Then, the score is computed based on the dice score. The dice score is weighted based on the region sizes in the ground truth.
A score of 0 is considered bad and 10 is good. The maximum dice score that will receive a 0 is controlled by the parameter
minimumDiceScore defined at the top of the file. Scores above the minimum will scale linarly up to a 10. If resampling is allowed,
there is a margin for what is considered "perfect" which is controlled by the parameter resamplingMarginForPerfect.
Args:
gtFilename: The name of a file containing VTK image data (.vti) that stores the classification of each ground truth point.
gtPointsArrayName: The name of the array in the the GT file that classifies each point. This should store
a categorical value (such as the index of critical points).
reconFilename: The name of a file containing VTK image data (.vti) that stores the classification of each reconstructed point.
reconPointsArrayName: The name of the array in the reconstructed file that classifies each point.
verbose: Should messages be printed out if there are errors with the files.
allowResampling: If the ground truth and reconstructed files have different resolutions, should they be resampled onto a new, fine
grid so that they have the same resolution (if not, a score of 0 will be returned).
Returns:
An integer score from 0-10 that determines how similar the partitions are. A score of 0 is considered bad and 10 is good.
"""
scriptName = os.path.basename(__file__)
# load files
try:
reconImage, reconArray = _getImageDataAndArray(reconFilename, reconArrayName)
except Exception as e:
if verbose:
print(e)
return 0
reconDimensions = reconImage.GetDimensions()
gtImage, gtArray = _getImageDataAndArray(gtFilename, gtArrayName)
gtDimensions = gtImage.GetDimensions()
# check that dimensionality is correct
if len(reconDimensions) == 3:
if reconDimensions[2] != 1:
if verbose:
print(f"{scriptName}: {reconFilename} is not 2D and has shape {reconDimensions}. Excpected a 2D input.")
return 0
reconDimensions = (reconDimensions[0], reconDimensions[1])
if len(gtDimensions) == 3:
if gtDimensions[2] != 1:
raise ValueError(f"{scriptName}: ground truth file {gtFilename} is not 2D and has shape {gtDimensions}")
gtDimensions = (gtDimensions[0], gtDimensions[1])
gtArrayNumpy = vtk_to_numpy(gtArray).reshape(gtDimensions, order="F")
reconArrayNumpy = vtk_to_numpy(reconArray).reshape(reconDimensions, order="F")
# resample to new meshes if necesary and able
if allowResampling:
gtArrayNumpy, reconArrayNumpy = _resampleToCommonMesh(gtArrayNumpy, reconArrayNumpy)
dimensions = gtArrayNumpy.shape
else:
if gtDimensions != reconDimensions:
if verbose:
print(f"{scriptName}: expected grond truth file {gtFilename} and reconstructed file {reconFilename} to have the same dimensions. Found {gtDimensions} and {reconDimensions}")
return 0
dimensions = gtDimensions
# partition the domain into different connected regions.
gtPartition = _partition2DDomain(gtArrayNumpy)
reconPartition = _partition2DDomain(reconArrayNumpy)
# compute pairwise overlap between each pair of regions.
numPoints = dimensions[0] * dimensions[1]
regionOverlaps = {}
gtRegionSizes = {}
reconRegionSizes = {}
gtPartition = gtPartition.flatten()
reconPartition = reconPartition.flatten()
numPoints = gtPartition.shape[0]
for i in range(numPoints):
gtPartitionId = gtPartition[i]
reconPartitionId = reconPartition[i]
if (gtPartitionId, reconPartitionId) in regionOverlaps:
regionOverlaps[(gtPartitionId, reconPartitionId)] += 1
else:
regionOverlaps[(gtPartitionId, reconPartitionId)] = 1
if gtPartitionId in gtRegionSizes:
gtRegionSizes[gtPartitionId] += 1
else:
gtRegionSizes[gtPartitionId] = 1
if reconPartitionId in reconRegionSizes:
reconRegionSizes[reconPartitionId] += 1
else:
reconRegionSizes[reconPartitionId] = 1
numGTPartitions = int(max(gtRegionSizes) + 1)
numReconPartitions = int(max(reconRegionSizes) + 1)
# compute the weighted pairwise dice score between connected regions.
pairwiseDice = np.zeros((numGTPartitions, numReconPartitions))
for i in range(numGTPartitions):
for j in range(numReconPartitions):
if (i,j) in regionOverlaps:
pairwiseDice[i,j] = -(gtRegionSizes[i] / numPoints) * ( 2 * regionOverlaps[(i,j)] / ( gtRegionSizes[i] + reconRegionSizes[j] ) )
else:
pairwiseDice[i,j] = 0
# pad matrix to square for the Hungarian algorithm
pairwiseDice = _padMatrixToSquare(pairwiseDice, 0)
# if the matrix has one nonzero entry per row/column, that means that the regions perfectly overlap and we should
# return a perfect 10.
if np.all(np.count_nonzero(pairwiseDice, axis=0) == 1) and np.all(np.count_nonzero(pairwiseDice, axis=1) == 1):
return 10
# use the Hungarian algorithm to find the pairing between different regions that maximizes the overall weighted dice score.
rowInd, colInd = linear_sum_assignment(pairwiseDice)
diceScore = -pairwiseDice[rowInd, colInd].sum()
# compute and return a score based on the dice score.
if allowResampling and diceScore > resamplingMarginForPerfect and (gtArrayNumpy.shape != gtDimensions or reconArrayNumpy.shape != reconDimensions):
return 10
score = round( 10 * (diceScore - minimumDiceScore) / (1-minimumDiceScore) )
if not canImperfectPredictionsScore10 and score == 10:
return 9
if score < 0:
return 0
return score
def _loadGraphFromVTK(pointsFilename : str, edgesFilename : str, arrayNamesToLoad : list[tuple[str,str]]) -> nx.Graph:
"""
Given a graph stored in VTK files, load the graph and store it as a networkx graph.
The graph must be stored in two legacy VTK files (.vtk). The points file should contain all of the points
as well as any pointlabels that should be loaded. The point labels should be stored as VTK arrays defined on each point.
The edges should be stored in a separate file.
Args:
pointsFilename: The name of a file in legacy VTK format (.vtk) storing the points in the graph.
edgesFilename: The name of a file in legacy VTK format (.vtk) storing the edges of the graph. It should only have
cells that are the segments of the graph.
arrayNamesToLoad: A list of tuples of two strings. Each tuple represents a different point label that should be loaded.
The first string in the tuple should store the name of the VTK array that should be used to produce point
labels. The second string should store the name of the point label category in the networkx graph.
Returns:
A networkx graph with point labels defined according to the input files.
"""
scriptName = os.path.basename(__file__)
# read edges from file
edgesReader = vtk.vtkDataSetReader()
edgesReader.SetFileName(edgesFilename)
edgesReader.update()
edgesOutput = edgesReader.GetOutput()
if edgesOutput is None:
raise ValueError(f"{scriptName}: the file '{edgesFilename}' is not properly formatted legacy VTK data")
# iterate through the edges and represent them as a list of ordered pairs
pointsFromEdges = set()
edgeList = []
numCells = edgesOutput.GetNumberOfCells()
for i in range(numCells):
cell = edgesOutput.GetCell(i)
if cell.GetNumberOfPoints() != 2:
raise ValueError(f"{scriptName}: the file '{edgesFilename}' contains a cell that is not a line segment")
point1 = cell.GetPoints().GetPoint(0)
point2 = cell.GetPoints().GetPoint(1)
edgeList.append((point1,point2))
pointsFromEdges.add(point1)
pointsFromEdges.add(point2)
# create the graph and add the edges
graph = nx.Graph()
graph.add_edges_from(edgeList)
if not os.path.isfile(pointsFilename):
raise FileNotFoundError(f"{scriptName}: The file '{pointsFilename}' does not exist")
# read points from file
pointsReader = vtk.vtkDataSetReader()
pointsReader.SetFileName(pointsFilename)
pointsReader.Update()
pointsOutput = pointsReader.GetOutput()
if pointsOutput is None:
raise ValueError(f"{scriptName}: The file '{pointsFilename}' is not properly formatted legacy VTK data")
pointData = pointsOutput.GetPointData()
if pointData is None:
raise ValueError(f"{scriptName}: The file '{pointsFilename}' does not have any associated point data")
# For each point label, create a dictionary where the keys are the point's coordinates and the values are the label.
pointsSet = set()
arrays = {}
arrayInfo = {}
numPoints = pointsOutput.GetNumberOfPoints()
for arrayName, abbreviatedName in arrayNamesToLoad:
array = pointData.GetArray(arrayName)
if array is None:
raise ValueError(f"{scriptName}: The file '{pointsFilename}' has no point array '{arrayName}'")
arrays[abbreviatedName] = array
arrayInfo[abbreviatedName] = {}
for i in range(numPoints):
point = pointsOutput.GetPoint(i)
pointsSet.add(pointsOutput.GetPoint(i))
for abbreviatedName in arrays:
value = arrays[abbreviatedName].GetTuple1(i)
arrayInfo[abbreviatedName][point] = value
# check that the points in the points file matches the points in the edge file, and return the graph
if pointsSet != pointsFromEdges:
raise ValueError(f"{scriptName}: The files '{pointsFilename}' and '{edgesFilename}' contain a different set of points")
for abbreviatedName in arrays:
nx.set_node_attributes(graph, arrayInfo[abbreviatedName], abbreviatedName)
return graph
def _getInternalDistancesForGWDistance(graph : nx.Graph, points : np.ndarray):
"""
Given a merge tree, compute the distance between each pair of nodes for computing the GW distance.
For mathematical specifics, see:
Mingzhe Li et al. "Flexible and Probabilistic Topology Tracking With Partial Optimal Transport".
doi: 10.1109/TVCG.2025.3561300
This implementation only works with join trees, and not split trees or contour trees.
Args:
graph: A merge tree represented as a networkx graph. Each node should have a label "sf" that stores
the scalar field value associated with the node.
points: A [n,3] numpy array storing the locations of each vertex in the graph. Each row should store
the coordinates of a different vertex.
Returns:
If the graph has n nodes, this will return an [n,n] numpy array where the (i,j) entry stores the
distance from node i to node j.
"""
numPoints = points.shape[0]
C = np.zeros((numPoints,numPoints))
for i in range(numPoints):
for j in range(i+1,numPoints):
node1 = tuple(points[i])
node2 = tuple(points[j])
f1 = graph.nodes[node1]["sf"]
f2 = graph.nodes[node2]["sf"]
path = nx.shortest_path(graph, node1, node2)
lca = max(path, key = lambda n : graph.nodes[n]["sf"])
flca = graph.nodes[lca]["sf"]
dist = abs(f1-flca) + abs(f2-flca)
C[i,j] = dist
C[j,i] = dist
return C
def mergeTreePartialFusedGWDistanceScore(gtPointsFilename : str, gtEdgesFilename : str, gtCriticalTypeArrayName : str,
gtScalarArrayName : str, reconPointsFilename : str, reconEdgesFilename : str,
reconCriticalTypeArrayName : str, reconScalarArrayName : str, verbose : bool = False) -> int:
"""
Given two merge trees, compute a score of 0-10 for their similarity based on the partial fused Gromov-Wasserstein distance.
For specifics of the distance computation, see:
Mingzhe Li et al. "Flexible and Probabilistic Topology Tracking With Partial Optimal Transport".
doi: 10.1109/TVCG.2025.3561300
Each merge tree should be stored as two legacy VTK files (.vtk) where there is one file for the points and another for the edges.
The points file should label each point with its critical point type and function value. The critical point types should be as
follows: 0: minimum. 1: 1-saddle. 2: 2-saddle. 3: maximum. 4: degenerate.
This function can only be used to compare join trees, and not split trees or contour trees.
The distance is conrolled by an alpha parameter which is defined at the top of the file. The smallest distance that will
score a 0 is given by the maximumPFGWDistance parameter. The score for all other distances scales linearly where a distance of 0
scores a perfect 10.
Due to numerical instability, we allow slightly imperfect distances to score a perfect 10. The largest such distance is
controlled by the perfectPFGWDistanceCutoff parameter.
Args:
gtPointsFilename: The name of a file in legacy VTK format (.vtk) that stores the points of the ground truth merge tree.
gtEdgesFilename: The name of a file in legacy VTK format (.vtk) that stores the edges of the ground truth merge tree. The
edges should each take the form of a cell of type vtkLine.
gtCriticalTypeArrayName: The name of the point array in the GT points file that stores the critical point type of each point.
gtScalarArrayName: The name of the point array in the GT points file that stores the scalar field value at each point.
reconPointsFilename: The name of a file in legacy VTK format (.vtk) that stores the points of the reconstructed merge tree.
reconEdgesFilename: The name of a file in legacy VTK format (.vtk) that stores the edges of the reconstructed merge tree.
reconCriticalTypeArrayName: The name of the point in array in the reconstructed points file that stores the critical
point type of each point.
reconScalarArrayName: The name of the point array in the reconstructed points file that stores the scalar field value at
each point.
verbose: hould messages be printed out if there are errors with the files.
"""
# load the files
try:
reconGraph = _loadGraphFromVTK(reconPointsFilename, reconEdgesFilename, [(reconCriticalTypeArrayName, "ct"), (reconScalarArrayName, "sf")])
except Exception as e:
if verbose:
print(e)
return 0
gtGraph = _loadGraphFromVTK(gtPointsFilename, gtEdgesFilename, [(gtCriticalTypeArrayName, "ct"), (gtScalarArrayName, "sf")])
# set up attribute distance dA
# sort the points by critical point type and arrange into a single stacked numpy array per tree.
gtPointsList = list(gtGraph.nodes())
reconPointsList = list(reconGraph.nodes())
gtPointsList.sort(key = lambda p : gtGraph.nodes[p]["ct"])
reconPointsList.sort(key = lambda p : reconGraph.nodes[p]["ct"])
gtPointsNumpy = np.array(gtPointsList)
reconPointsNumpy = np.array(reconPointsList)
# compute the position in each numpy array where each critical type starts.
gtCTCounts = collections.Counter(gtGraph.nodes[n]["ct"] for n in gtGraph.nodes)
reconCTCounts = collections.Counter(reconGraph.nodes[n]["ct"] for n in reconGraph.nodes)
gtCTStartIndices = {}
reconCTStartIndices = {}
nextGTCTStartIndex = 0
nextReconCTStartIndex = 0
for ct in range(5):
gtCTStartIndices[ct] = nextGTCTStartIndex
if ct in gtCTCounts:
nextGTCTStartIndex += gtCTCounts[ct]
reconCTStartIndices[ct] = nextReconCTStartIndex
if ct in reconCTCounts:
nextReconCTStartIndex += reconCTCounts[ct]
gtCTStartIndices[5] = gtPointsNumpy.shape[0]
reconCTStartIndices[5] = reconPointsNumpy.shape[0]
# dA is computed this way in the paper. This was discovered in a private correspondence with the author.
dA = ot.dist(gtPointsNumpy, reconPointsNumpy)
dA /= np.max(dA)
dA += 1
for ct in range(5):
if gtCTStartIndices[ct] != gtCTStartIndices[ct+1] and reconCTStartIndices[ct] != reconCTStartIndices[ct+1]:
dA[ gtCTStartIndices[ct]:gtCTStartIndices[ct+1], reconCTStartIndices[ct]:reconCTStartIndices[ct+1] ] -= 1
# Compute the distances between nodes within each tree. Normalize the values so that they are
# compatible with dA.
gtC = _getInternalDistancesForGWDistance(gtGraph, gtPointsNumpy)
reconC = _getInternalDistancesForGWDistance(reconGraph, reconPointsNumpy)
normalization = np.max(gtC)
gtC /= normalization
reconC /= normalization
# set up distributions and mass transported for the OT computation.
gtDist = np.ones(gtPointsNumpy.shape[0])
reconDist = np.ones(reconPointsNumpy.shape[0])
mass = min(gtPointsNumpy.shape[0], reconPointsNumpy.shape[0])
distance = ot.gromov.partial_fused_gromov_wasserstein2(dA, gtC, reconC, gtDist, reconDist, mass, alpha=alpha )
# based on the OT distance compute and return a score.
if distance < perfectPFGWDistanceCutoff:
return 10
score = round(10 * (maximumPFGWDistance - distance) / maximumPFGWDistance)
if not canImperfectPredictionsScore10 and score == 10:
return 9
if score < 0:
return 0
return score
def _mergeTreePersistenceDiagram(tree : nx.Graph) -> np.ndarray:
"""
Given a merge tree represented as a networkx graph, compute its persistence diagram.
The networkx graph must contain a vertex attribute "sf" that stores the scalar field value at each node.
This algorithm only works for join trees, not split trees or contour trees.
Args:
tree: The merge tree that is computed.
Returns:
The persistence diagram of the merge tree. If n is the number of features in the persistence diagram,
it will be returned as an [n,2] numpy array, where each row stores the (birth, death) points of a feature.
The (birth,death) times are represented as function values and are not normalized.
"""
def f(node):
return tree.nodes[node]["sf"]
# sort leaves decreasing by function value
marked_points = set()
leaves = [n for n in tree if tree.degree(n) == 1]
leaves.sort(key = lambda n : f(n))
leaves = leaves[:-1]
leaves.reverse()
# for each leaf, climb up the tree and pair with the first unpaired saddle.
pairs = []
for n1 in leaves:
val = f(n1)
base_node = n1
higher_neighbor = None
while higher_neighbor is None:
# look for the neighbor with the highest function value
found_next_point = False
for n2 in tree.neighbors(base_node):
if f(n2) > val:
if n2 in marked_points:
base_node = n2
val = f(n2)
else:
higher_neighbor = n2
marked_points.add(n2)
found_next_point = True
break
if not found_next_point:
raise Exception
pairs.append((f(n1), f(higher_neighbor)))
return np.array(pairs)
def mergeTreePersistenceWassersteinScore(gtPointsFilename : str, gtEdgesFilename : str, gtScalarArrayName : str,
reconPointsFilename : str, reconEdgesFilename : str, reconScalarArrayName : str, verbose : bool = False) -> int:
"""
Given two different merge trees stored in VTK format,
compute a similarity score from 0-10 based on the Wassertein distance of their persistence diagrams.
This implementation only works with join trees, and not split trees or contour trees. Each merge tree
should be stored as two legacy VTK files (.vtk) where there is one file for the points and another for the edges.
The points file should label each point with its function value.
The result is controlled by several different parameters defined at the top of the file. The Wasserstein distance has
an order controlled by wassersteinOrder, while the ground metric has an order given by wassersteinGroundMetric.
After the distance is computed, an average is taken by dividing it through by (|P|+|Q|)/2.
A score of 0 is bad and 10 is good. The lowest value that can score zero points is given by
maximumAverageWassersteinDistance. Scores below this will scale linearly, where a distance of zero scores 10 points.
gtPointsFilename: The name of a file in legacy VTK format (.vtk) that stores the points of the ground truth merge tree.
gtEdgesFilename: The name of a file in legacy VTK format (.vtk) that stores the edges of the ground truth merge tree. The
edges should each take the form of a cell of type vtkLine.
gtScalarArrayName: The name of the point array in the GT points file that stores the scalar field value at each point.
reconPointsFilename: The name of a file in legacy VTK format (.vtk) that stores the points of the reconstructed merge tree.
reconEdgesFilename: The name of a file in legacy VTK format (.vtk) that stores the edges of the reconstructed merge tree.
reconScalarArrayName: The name of the point array in the reconstructed points file that stores the scalar field value at
each point.
verbose: hould messages be printed out if there are errors with the files.
"""
try:
reconGraph = _loadGraphFromVTK(reconPointsFilename, reconEdgesFilename, [(reconScalarArrayName, "sf")])
except Exception as e:
if verbose:
print(e)
return 0
gtGraph = _loadGraphFromVTK(gtPointsFilename, gtEdgesFilename, [(gtScalarArrayName, "sf")])
gtPersistenceDiagram = _mergeTreePersistenceDiagram(gtGraph)
reconPersistenceDiagram = _mergeTreePersistenceDiagram(reconGraph)
minFunctionValue = np.min(gtPersistenceDiagram)
maxFunctionValue = np.max(gtPersistenceDiagram)
gtPersistenceDiagram = (gtPersistenceDiagram - minFunctionValue) / (maxFunctionValue - minFunctionValue)
reconPersistenceDiagram = (reconPersistenceDiagram - minFunctionValue) / (maxFunctionValue - minFunctionValue)
wassersteinDistance = gudhi.wasserstein.wasserstein_distance(gtPersistenceDiagram, reconPersistenceDiagram,
order=wassersteinOrder, internal_p=wassersteinGroundMetric)
numAverage = (gtPersistenceDiagram.shape[0] + reconPersistenceDiagram.shape[0]) / 2
wassersteinDistance /= numAverage
if wassersteinDistance == 0:
return 10
score = round( 10 * (maximumAverageWassersteinDistance - wassersteinDistance) / maximumAverageWassersteinDistance )
if not canImperfectPredictionsScore10 and score == 10:
return 9
if score < 0:
return 0
return score