File size: 142,540 Bytes
da32f0b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 | \documentclass[twoside]{article}
\usepackage[utf8]{inputenc} % allow utf-8 input
\usepackage[T1]{fontenc} % use 8-bit T1 fonts
\usepackage{hyperref} % hyperlinks
\usepackage{url} % simple URL typesetting
\usepackage{booktabs} % professional-quality tables
\usepackage{amsfonts} % blackboard math symbols
\usepackage{nicefrac} % compact symbols for 1/2, etc.
\usepackage{microtype} % microtypography
\usepackage{xcolor} % colors
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{graphicx}
\usepackage{natbib}
%\usepackage{booktabs}
%\usepackage{multirow}
%\usepackage[table]{xcolor}
%\usepackage{geometry}
%\usepackage{graphicx} % for resizing tables and figures
%\geometry{margin=1in}
%\usepackage{caption}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% THEOREMS
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{amsthm}
\theoremstyle{plain}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{assumption}[theorem]{Assumption}
\theoremstyle{remark}
\newtheorem{remark}[theorem]{Remark}
\newcommand{\hsic}{\operatorname{HSIC}}
\newcommand{\cka}{\operatorname{CKA}}
\newcommand{\kcka}{\operatorname{kCKA}}
\newcommand{\mka}{\operatorname{MKA}}
\newcommand{\tr}{\operatorname{trace}}
\newcommand{\rbf}{\operatorname{RBF}}
\newcommand{\lin}{\operatorname{LIN}}
\newcommand{\knn}{\operatorname{KNN}}
\newcommand{\sgn}{\operatorname{sgn}}
\newcommand{\rtd}{\operatorname{RTD}}
\newcommand{\srtd}{\operatorname{sRTD}}
\newcommand{\imd}{\operatorname{IMD}}
\newcommand{\simd}{\operatorname{sIMD}}
%\usepackage{aistats2026}
% If your paper is accepted, change the options for the package
% aistats2026 as follows:
%
%\usepackage[accepted]{aistats2026}
%
% This option will print headings for the title of your paper and
% headings for the authors names, plus a copyright note at the end of
% the first column of the first page.
% We also include a `preprint' option for non-anonymous preprints.
% Change the options for the package aistats2026 as follows:
%
\usepackage[preprint]{aistats2026}
%
% This option will print headings for the title of your paper and
% headings for the authors names, but does not print the copyright and
% venue note at the end of the first column of the first page.
% If you set papersize explicitly, activate the following three lines:
%\special{papersize = 8.5in, 11in}
%\setlength{\pdfpageheight}{11in}
%\setlength{\pdfpagewidth}{8.5in}
% If you use the natbib package, activate the following three lines:
%\usepackage[round]{natbib}
%\renewcommand{\bibname}{References}
%\renewcommand{\bibsection}{\subsubsection*{\bibname}}
% If you use BibTeX in apalike style, activate the following line:
%\bibliographystyle{apalike}
\begin{document}
\pagestyle{plain}
% If your paper is accepted and the title of your paper is very long,
% the style will print as headings an error message. Use the following
% command to supply a shorter title of your paper so that it can be
% used as headings.
%
%\runningtitle{I use this title instead because the last one was very long}
% If your paper is accepted and the number of authors is large, the
% style will print as headings an error message. Use the following
% command to supply a shorter version of the author names so that
% they can be used as headings (for example, use only the surnames)
%
%\runningauthor{Surname 1, Surname 2, Surname 3, ...., Surname n}
\twocolumn[
\aistatstitle{Manifold Approximation leads to Robust Kernel Alignment}
\aistatsauthor{Mohammad Tariqul Islam \And Du Liu \And Deblina Sarkar}
\aistatsaddress{ MIT \\mhdtariq@mit.edu \And MIT \\liudu@mit.edu \And MIT \\ deblina@mit.edu } ]
\begin{abstract}
Centered kernel alignment (CKA) is a popular metric for comparing representations, determining equivalence of networks, and neuroscience research. However, CKA does not account for the underlying manifold and relies on numerous heuristics that cause it to behave differently at different scales of data. In this work, we propose Manifold approximated Kernel Alignment (MKA), which incorporates manifold geometry into the alignment task. We derive a theoretical framework for MKA. We perform empirical evaluations on synthetic datasets and real-world examples to characterize and compare MKA to its contemporaries. Our findings suggest that manifold-aware kernel alignment provides a more robust foundation for measuring representations, with potential applications in representation learning.
\end{abstract}
\section{Introduction}
Centered Kernel Alignment (CKA)~\citep{cortes2010twostage,kornblith2019similarity} is a statistical method used to compare the similarity between representations of data, often in the form of feature maps or embeddings.
It works by aligning kernels, which capture pairwise relationships within datasets, and measuring their agreement. CKA is widely used in studies to compare layers of neural networks, analyze representational similarity, and study how models process information~\citep{ramasesh2020anatomy,nguyen2022origins,ciernik2024training}.
Its ability to handle datasets of different sizes and dimensions makes it a powerful tool to understand complex models and evaluate their performance. However, very few studies have characterized CKA under known representations/topologies. Moreover, the reliability of the CKA measure has been under scrutiny numerous times~\citep{davarireliability,murphy2024correcting}.
To address this, we propose Manifold-approximated Kernel Alignment (MKA).
Manifold approximation is a way of understanding and simplifying complex data. In many real-world problems, data with many dimensions - like x-rays, medical records, and neuroimaging data - actually lie on a much smaller, curved structure called a ``manifold'' within the high-dimensional space. Known as the ``manifold hypothesis'', this concept is integral to modern statistics and learning algorithms~\citep{fefferman2016testing}. Manifold approximation uncovers and represents this underlying structure within the high-dimensional data by exploiting the relationships between data points.
It is an integral part of non-linear dimensionality reduction, e.g., t-distributed Stochastic Neighbor Embedding (t-SNE)~\citep{van2008visualizing} and Uniform Manifold Approximation and Projection (UMAP)~\citep{mcinnes2018umap}.
We use manifold approximation to define a non-linear and non-Mercer kernel.
Using this kernel function, we provide a theoretical framework for MKA.
With extensive characterization on synthetic datasets, we show that MKA is more consistent under varying dimensionality and shapes that preserve topology.
We also discovered that MKA captures the underlying topology better and is less sensitive to hyperparameters than CKA and many of its contemporary methods.
To achieve this, we performed experiments using various known shapes and topologies, taking into consideration distributions and their behavior that mimics real work settings.
We also perform large-scale benchmarks on multiple tasks (vision, natural language, and graph) and datasets to assess the quality of the algorithm.
Overall, this work will pave the way for applying manifold approximation in diverse applications.
An implementation of MKA is available at \href{https://github.com/tariqul-islam/mka}{https://github.com/tariqul-islam/mka}.
\section{Related Works}
The recent interest in alignment metrics stems from the desire to understand how neural network works and how the intermediate layers of neural networks are related. To compare learned features, we need metrics that measure alignment between two representations. Earlier studies assessed representational similarity with correlation- and mutual-information–based measures~\citep{li2015convergent} and with linear-classifier probes~\citep{alain2016understanding}. Next progress came from \cite{raghu2017svcca}, who modeled the problem as one of dimensionality reduction and used singular value decomposition (SVD) to remove noise from the representations, followed by canonical correlation for alignment, namely SVCCA. Later, \cite{morcos2018insights} proposed PWCCA, which extends SVCCA by weighting the canonical directions according to their contribution to the original representations, making the similarity measure more robust to noisy or unimportant dimensions. This dimensionality reduction approach is also followed by a few other studies~\citep{sussillo2013opening,maheswaranathan2019universality}. Other approaches include, revisiting classifier probes~\citep{graziani2019interpreting,davari2022probing}, exploring multiple approaches together~\cite{ding2021grounding}, Procrustes analysis~\citep{williams2021generalized}, graphs~\citep{chen2021revisit}, and exploring effect of transformations~\citep{lenc2015understanding}.
However,~\cite{kornblith2019similarity}’s exploration of representations through kernel methods has sparked renewed attention and discoveries in this area. Known as centered kernel alignment (CKA)~\citep{cortes2010twostage,kornblith2019similarity}, this approach compares two different kernel matrices obtained from the representations. The initial studies~\citep{kornblith2019similarity,nguyen2020wide,raghu2021vision} explored the feature similarity of nearby layers (the famous block structure). However, in contrast to dimensionality reduction methods, CKA lacks an explicit denoising step. Another concurrent observation is that the kernel structure is relatively robust when low-variance components are removed~\citep{ding2021grounding}. Later, \cite{nguyen2022origins} discovered that the block structure is primarily due to a few dominant datapoints. \cite{davarireliability} formalized these observations theoretically. For a comparison of many of the related methods, see \cite{williams2024equivalence}.
Another avenue is to explore the nearest neighbor structure, which, in our opinion, is a natural extension of the CKA philosophy. \cite{huh2024platonic} proposed a mutual nearest neighbor-based extension of CKA. \cite{tsitsulin2019shape} proposed Intrinsic Multi-scale Distance (IMD), which uses the heat kernel to estimate the manifold. Recently, topological data analysis has been applied to propose Representational Topology Divergence (RTD)~\cite{barannikov2021representation,tulchinskiirtd}.
The kernel approach also connects to manifold approximation, a cornerstone of non-linear dimensionality reduction. Methods such as SNE~\citep{hinton2002stochastic,van2008visualizing}, UMAP~\citep{mcinnes2018umap}, and related variants~\citep{wang2021understanding,damrich2022t} rely on efficient sampling of the manifold followed by optimization of a low-dimensional embedding. In particular, the use of k-nearest neighbor graphs and parameter-tuned local neighborhoods has proven to be an effective tool for this class of methods. While k-nearest neighbors show usefulness in some recently proposed alignment metrics~\citep{tsitsulin2019shape,huh2024platonic} and topology~\citep{damrich2024persistent}, the kernels arising from manifold approximation lack wide adoption here and in other kernel-based algorithms.
\begin{figure*}
\centering
\includegraphics[width=1.0\linewidth]{figures/swiss_s_fig.eps}
\caption{Equivalence of two different shapes with 1-D manifolds. (a) Swiss-roll. (b) S-curve by varying parameter $r$. (c) Alignment for the methods as S-curve parameter, $r$, varies. (d) Alignment for different methods as the number of nearest neighbors, $k$, varies. Note that $\cka$, RTD, and SVCCA do not have any notion of nearest neighbors; thus, we have plotted these values at the end of the x-axis.}
\label{fig:swiss_s_fig}
\end{figure*}
\section{Centered Kernel Alignment (CKA)}
Let $X\in\mathbb{R}^{N\times d_1}$ and $Y\in\mathbb{R}^{N\times d_2}$ be feature sets from $N$ samples each with $d_1$ and $d_2$ features, respectively. The corresponding symmetric kernel matrices are $K$ and $L$ with $K_{ij}=k(x_i,x_j)$ and $L_{ij}=l(y_i,y_j)$, respectively. The CKA measure between the two feature sets is given by
\begin{align}
\cka(K,L) = \frac{\hsic(K,L)}{\sqrt{\hsic(K,K)\hsic(L,L)}},
\end{align}
where $\hsic(\cdot,\cdot)$ is the Hilbert-Schmidt independent criterion given by
$\hsic(K,L) = \frac{1}{(n-1)^2}\tr(KHLH)$. Here, $H=I-\frac{1}{n}\mathbf{1}\mathbf{1}^T$ is a centering matrix that mitigates bias in the kernel. There are other debiasing techniques~\citep{song2007supervised,sucholutsky2023getting}, however, we will consider the simplest and most widely used technique in practice. $\hsic$ computes the similarity between the two kernel matrices of the same size, while the $\cka$ measure normalizes this similarity within $[0,1]$.
Various options exist for the kernel. The common ones include the linear kernel (LIN) given by $k(x_i,xj)=x_i^Tx_j$ and the radial basis function (RBF) kernel given by $k(x_i,x_j)=\exp(-||x_i-x_j||/(2\sigma^2))$, where $\sigma$ is the bandwidth of the Gaussian. The following theorem establishes an equivalence relation between CKA with linear and RBF kernel:
\begin{theorem}[\cite{alvarez2022gaussian}]\label{theorem:alvarez}
$\cka(K_{\rbf},L) = \cka(K_{\lin},L)+O(1/\sigma^2)$ as $\sigma\to\infty$. Here, $K_{\rbf}$ is the RBF kernel matrix with bandwidth $\sigma$, $K_{\lin}$ is the linear kernel matrix, and $L$ is any positive definite symmetric kernel matrix.
\end{theorem}
Softly, it states that at higher values of $\sigma$, CKA with linear and RBF kernels behave equivalently. Various studies have reported this in empirical settings (e.g., in~\cite{kornblith2019similarity} and Fig. 4(a) of \cite{davarireliability}). Thus, most researchers use the linear kernel, effectively capturing linear relationships alone. And by Theorem~\ref{theorem:alvarez}, even results with an RBF kernel (without properly tuning the bandwidth, $\sigma$) potentially suffer from the same pitfalls of the linear one.
\section{Manifold-approximated Kernel Alignment (MKA)}
Manifold approximation is a method for defining a graph that quantifies the pairwise relations within the data. CKA already does this job by producing a dense kernel matrix that considers all possible pairs. In the field of non-linear dimensionality reduction, manifold approximation takes a central role in sampling the manifold of the data to reduce the complexity of computing the kernel matrix. This kernel is often sparse and typically obtained by the k-nearest neighbor ($\knn$) algorithm. Moreover, we will use a kernel function that is non-symmetric (i.e., $k(x_i,x_j)\neq k(x_j,x_i)$). Thus, our kernel will not be positive semidefinite; rather, it will fall in the class of indefinite or non-Mercer kernels~\citep{ong2004learning}. Here, we adopt the manifold approximation method from UMAP\footnote{UMAP uses a graph-based kernel. It performs a symmetrization step to define it. We skip this step for computational efficiency.}. Our manifold-approximated kernel ($K_U$) defines a pairwise relationship by
\begin{align}
K^{(U)}_{ij} &= \begin{cases}
1, &\text{if~~~} i=j\\
\exp{\left(-\frac{d(x_i,x_j)-\rho_i}{\sigma_i}\right)} & \text{if } x_j\in \knn(x_i,k) \\
0 & \text{otherwise}
\end{cases}, \label{eq:UMAP_HIGH_DIM}
\end{align}
where $\knn(x_i,k)$ contains the $k$-nearest neighbors of $x_i$, $d(\cdot,\cdot)$ is a distance metric, $\rho_i = \min_{x_j\in \text{KNN}(x_i,k)} d(x_i,x_j)$ is the minimum distance from the nearest neighbor and $\sigma_i$ is a scaling parameter akin to bandwidth of RBF function. The scaling parameter is computed such that $\sum_j K^{(U)}_{ij}=1+\log_2(k)$. This constraint fixes the row of the kernel matrix to a constant and makes the kernel less sensitive to lone outliers. Additionally, this imposes a rank order within the row. The $\knn$ imposes a stricter constraint on the number of points that are considered related compared to CKA, which allows for a softer, more global measure of similarity. Overall, $K_U$ is a graph on the data that depends on only one hyperparameter: $k$. Now, we define Manifold-approximated Kernel Alignment (MKA) as:
\begin{align}
\mka(K_U,L_U) = \frac{\langle K_UH, L_UH\rangle}{\sqrt{\langle K_UH, K_UH\rangle\langle L_UH, L_UH\rangle}}.
\end{align}
\begin{figure*}
\centering
\includegraphics[width=1.0\linewidth]{figures/rings_fig.eps}
\caption{Alignment for the ``rings'' data. (a) Point clouds used in the clusters experiment. (b) Alignment using various methods, along with Kendall's rank correlation ($\tau$, higher is better). (c-e) Alignment by varying nearest neighbors, $k$, in (c) IMD, (d) kCKA, and (e) MKA. MKA shows the most robustness to the parameter $k$.}
\label{fig:ranking_rings}
\end{figure*}
Despite using non-symmetric kernels, the measure $\mka$ is symmetric ($\mka(K_U,L_U)=\mka(L_U,K_U)$). However, unlike CKA, which performs both row- and column-wise centering, we opted for only row-wise centering. This leaves additional bias terms in the estimation, however, we show in Appendix~\ref{sec:more_cka} that this slight oversight does not make $\mka$ less meaningful. Exploiting the properties of the kernel matrix we can simplify and characterize $\mka$ by
\begin{theorem}\label{thm:mka_simple}
If $\sum_{j} K^{(U)}_{i,j} = D$ and $\sum_{j} L^{(U)}_{i,j} = D$, $\forall i$, then $\mka$ reduces to
\begin{align}
\mka(K_U,L_U) = \frac{\langle K_U,L_U\rangle-D^2}{\sqrt{ (\langle K_U, K_U \rangle-D^2) (\langle L_U, L_U \rangle-D^2) }}.
\end{align}
\end{theorem}
\begin{corollary}\label{thm:mka_range}
If $D < \sqrt{N}$, then $0<\mka(K_U,L_U)<1$.
\end{corollary}
Theorem~\ref{thm:mka_simple} enables fast computation of $\mka$, making it more scalable (especially when combined with approximate nearest neighbor search algorithms). Few works~\citep{chen2021revisit,huh2024platonic} have considered sparsifying the kernel matrix of CKA by taking the top-k values in rows/columns. However, these works do not consider constraining the rows/columns of the kernel matrix.
\section{Experiments}
In this section, we empirically characterize MKA using various datasets and benchmarks. We compare MKA with several CKA variants with the RBF kernel: 1) $\cka (\sigma=M)$: $\sigma$ is set to the median, $M$, of the entries of the distance matrix, 2) $\cka (\sigma=\delta M)$: $\sigma$ is set to $\delta M$ for considering local relationships (we mostly use $\delta=0.2$ or $0.45$), and 3) k$\cka$: sparsifying the kernel matrix by considering $k$-nearest neighbors of each sample and setting $\sigma$ to be median of the considered distances giving us a simple manifold approximation. kCKA works as an intermediate step between CKA and MKA.
Along with the CKA variants, we consider Representational Topology Divergence (RTD), Intrinsic Multi-scale Distance (IMD), and Support Vector Canonical Correlation Analysis (SVCCA) metrics.
RTD and IMD provide a metric within $[0,\infty]$, with a lower value showing strong alignment. We scale these values within $[0,1]$ using the formulae $\srtd=\exp{(-\rtd/\gamma)}$ and $\simd=exp{(-\imd/\gamma)}$ for the respective methods and tune $\gamma$ for each experiment (for additional figures for RTD and IMD for the experiments, see Appendix~\ref{sec:rtdimdraw}).
In the figures, we explicitly differentiate between IMD (RTD) and sIMD (sRTD), while in the text, we use them interchangeably. We do not consider CKA with a linear kernel in the main text, as the RBF kernel works as a good proxy for the linear one (due to Theorem~\ref{theorem:alvarez}; for additional discussion, see Appendix~\ref{sec:linear_kernel}).
\subsection{Equivalence of Shapes}
We start the experiments by comparing two classic shapes: Swiss-roll (Fig.~\ref{fig:swiss_s_fig}(a)) and S-curve (Fig.~\ref{fig:swiss_s_fig}(b), $r=0.5$). Although the Swiss roll and the S-curve look drastically different, they are topologically equivalent: both lie on a one-dimensional nonlinear manifold. Furthermore, the parameter $r$ in the S-curve can give it different shapes (Fig.~\ref{fig:swiss_s_fig}(b), for details see Appendix~\ref{sec:moresroll}). A color map shows the correspondence among the shapes. For $r<0.4$ and $r>0.6$, the colors overlap, and the 1-D manifold disappears. For experiments, we sampled 1000 points from each of the shapes and computed the alignment between them.
\begin{figure*}[t]
\centering
\includegraphics[width=1\linewidth]{figures/gauss_fig.eps}
\caption{Characterizing MKA using synthetic datasets and comparison to other methods. (a) Top: A Gaussian spot; colors identify the position of the points on the x-axis. Middle: Perturbed Gaussian spot. We added noise to the points of the top figure so that the colors slightly overlap. Bottom: A Gaussian spot with no correspondence to the spot on the top.
(b-e) Alignment between a Gaussian spot and when it is perturbed when (b) number of samples, $N$ ($d=1000$), and (c) number of dimensions, $d$ ($N=5000$), varies for various methods, and their performance as number of nearest neighbor, $k$, varies for (d) $d=2$ and (e) $d=100$ ($N=5000$).
(f-i) Alignment under lost correspondence when (b) number of samples, $N$ ($d=1000$), and (c) number of dimensions, $d$ ($N=5000$), varies for various methods, and their performance as number of nearest neighbor, $k$, varies for (d) $d=2$ and (e) $d=100$ ($N=5000$).
(j) Two uniform spots are located nearby (top) and translated far away (bottom).
(k-n) Alignment under translation when (k) number of samples, $N$ ($d=1000$), (l) number of dimensions, $d$ ($N=5000$), (m) translation distance, $t$, and (n) number of nearest neighbors, $k$, varies.
Error bars are drawn up to one standard deviation (5 trials for each experiment).}
\label{fig:gauss-fig}
\end{figure*}
CKA with $\sigma=M$ fails to align the manifold of Swiss-roll and S-curve ($r=0.5$), giving a lower value (Fig.~\ref{fig:swiss_s_fig}(c)). However, for cases where the 1-D manifold structure is absent (e.g., $r<0.4$ and $r>0.6$), CKA provides a higher value. On the contrary, CKA with $\delta=0.2$, kCKA, and MKA properly capture the alignment of the two shapes. At $r=0.5$, the alignment of the Swiss-roll and S-curve is highest and gets lower as the parameter moves away from this point. RTD and IMD do not show any trends, while SVCCA shows an unrelated oscillatory behavior (from curvature). However, kCKA is more sensitive to the number of nearest neighbors $k$ (Fig.~\ref{fig:swiss_s_fig}(d)), while MKA is very robust to the parameter.
\subsection{Ranking Structures}
In the second test, we reproduce the ``rings'' and ``clusters'' experiments that originally appeared in~\citep{barannikov2021representation}. This dataset consists of 500 points distributed over five concentric rings (radii varying from 0.5 to 1.5). Then, in each iteration, the number of rings decreases (as if one of the rings collapses onto another ring) until it reaches a singular ring (Fig.~\ref{fig:ranking_rings} (a)).
Then, we use alignment metrics to compare these formations with the original structure (i.e., five rings). The target of the experiment is to check whether the metrics can track the collapsing rings structure. Kendall's rank correlation, $\tau$, can measure this ranking in a statistical sense (for our case, the absolute value is sufficient and thus higher is better).
CKA and SVCCA fail to track this collapsing behavior, while RTD closely reflects the changes. CKA ($\delta=0.2$), kCKA, IMD, and MKA capture the ranking quite well (Fig.~\ref{fig:ranking_rings}(b)). However, varying the nearest neighbor parameter, $k$, causes different behaviors in different methods. IMD shows consistent behavior for $k=50$, $100$, and $200$ (Fig.~\ref{fig:ranking_rings}(c)). kCKA provides correct ranking only for lower values of $k$; at higher values $k \approx 200$ and above the method fails (Fig.~\ref{fig:ranking_rings}(d)). MKA provides correct ranking for all possible values of $k$ (Fig.~\ref{fig:ranking_rings}(e)).
The ``clusters'' set consists of 300 points sampled from a bivariate normal distribution ($\mathcal{N}(0,I_N)$). Then the points are split into $2$, $3$, $\dots$, $12$ clusters by moving them into a circle of radius $10$ (Fig.~\ref{fig:clusters_break}(a) in Appendix~\ref{sec:clusters_data}). The goal is to test whether the metrics detect the emergence of multiple clusters. Overall, kCKA, RTD, and MKA capture the ranking quite well (Fig.~\ref{fig:clusters_break}(b)) and the methods (where applicable) repeat the same behavior as the ``rings'' experiment when $k$ varies (Fig.~\ref{fig:clusters_break}(c-e)).
\subsection{Characterizing The Algorithms}
\begin{figure*}[t]
\centering
\includegraphics[width=1.0\linewidth]{figures/resi_fig.eps}
\caption{Aggregated ranks of alignment measures using the ReSi benchmark across different models and tests, separated by domains: (a) vision, (b) natural language processing, and (c) graph. Boxplots indicate quartiles of rank distributions; the whiskers extend up to 1.5 times the interquartile range. The black dots indicate the mean rank.}
\label{fig:resi}
\vspace{1em}
\centering
\includegraphics[width=\linewidth]{figures/network.eps}
\caption{Alignment between features from different layers of ResNet-50 trained on the CIFAR-10 dataset. (a) Alignment between layers of a network using (left) $\cka$, (middle) kCKA, and (right) $\mka$. (b) Alignment between layers across different networks using (left) $\cka$, (middle) kCKA, and (right) $\mka$. The results are an average of 10 instances of ResNet-18 trained on CIFAR-10, each initialized randomly and using a subset of $10000$ samples from the test set.}
\label{fig:resnet18}
\end{figure*}
In this section, we characterize the algorithms using several synthetic datasets inspired by real-world scenarios. First, we consider the alignment between a d-dimensional Gaussian spot ($x_i\sim\mathcal{N}(\mathbf{0},I_d)$, Fig.~\ref{fig:gauss-fig}(a) top) and its perturbed version ($y_i=x_i+0.5\mathcal{N}(\mathbf{0},I_d)$, Fig.~\ref{fig:gauss-fig}(a) middle).
Such a scenario may occur when a representation learning algorithm runs repeatedly. This results in altered orders of the points in the point cloud (seen as colors slightly overlapping).
As the number of samples in the spots increases ($d=1000$, Fig.~\ref{fig:gauss-fig}(e)), their alignment values using different methods decrease slightly (notable exceptions are IMD, which increases and then stabilizes, and CKA ($\delta=0.2$), which saturates).
This is expected, as the denser the spot gets, the higher the chance of orders within the point cloud.
However, the dimensionality ($d$) of the data affects the values differently ($N=5000$, Fig.~\ref{fig:gauss-fig}(c)).
All methods, except CKA with $\delta=0.2, 0.45$ and RTD, are fairly consistent as $d$ increases.
CKA with $\delta=0.2$ saturates rapidly, while with $\delta=0.45$ it approaches saturation as $d$ increases. sRTD starts with a lower value, and it increases with $d$.
Additionally, $k\cka$ shows inconsistent behavior as the number of nearest neighbors ($k$) increases and sIMD shows high variance, while $\mka$ values remain consistent across a wide range (Fig.~\ref{fig:gauss-fig}(d,e)).
Overall, $\mka$ is more restrictive to perturbations in the features than other methods.
We can take this scenario to the extreme and make the colors completely overlap each other (Fig.~\ref{fig:gauss-fig}(a), bottom).
The orderings (based on some criterion) of both the Gaussian spots will not correspond to each other at all, and thus, we call it a lost-correspondence scenario.
The $\cka$ (and $\delta=0.45$), SVCCA, measures are sensitive to the number of samples, while $k\cka$, RTD, and $\mka$ are fairly consistent ($d=1000$, Fig.~\ref{fig:gauss-fig}(f)).
The CKA ($\delta=0.45$ as well) measure tends to increase with higher data dimensionality, reflecting the effect of the curse of dimensionality ($N=5000$, Fig.~\ref{fig:gauss-fig}(g)).
SVCCA and RTD also behave similarly.
$\kcka$, IMD, and $\mka$, on the other hand, are fairly robust and less affected by the curse.
However, like before, $kCKA$ is highly sensitive to the number of nearest neighbors ($k$), which gets resolved at a higher value of $k\geq200$ (Fig.~\ref{fig:gauss-fig}(h,i)).
Like previously, $\mka$ is consistent for a wide range of $k$, even for values smaller than $200$.
Overall, $\mka$ is more consistent with varying hyperparameters than other methods.
Finally, we consider two uniform spots separated by a small distance (Fig.\ref{fig:gauss-fig}(j); this scenario is inspired by~\cite {davarireliability}).
Both spots ($N=2500$ each) are drawn from uniform distribution by $x_i\sim\mathcal{U}(-0.5,0.5)$ and $y_i\sim p+\mathcal{U}(-0.5,0.5)$ with $p=[1.1+t, 0, 0, \dots, 0]$, where the translation distance, $t (>0)$, controls the separation of the two spots.
Regardless of the translation distance, the topology of the data remains the same, and alignment should be high.
Surprisingly, most methods are consistent as the number of samples increases (except CKA with $\delta=0.2$).
CKA gives a low alignment score between the two representations, while kCKA and IMD stabilize as the number of samples increases.
We get a more diverse result as the number of dimensions, $d$, (Fig.~\ref{fig:gauss-fig}(l)) and the translation distance, $t$, (Fig.~\ref{fig:gauss-fig}(m)) vary.
$\cka$ fails to capture this phenomenon. As $t$ increases, $\cka$ value decreases; even using a smaller bandwidth $\delta=0.2$ fails.
Surprisingly, RTD also joins CKA and fails to capture the invariance of topology. SVCCA shows maximum alignment between the two representations under all circumstances.
In contrast, $\kcka$, IMD, and $\mka$ settle to a constant and higher number as $d$ and $t$ increase.
As $k$ increases, the pattern mirrors the earlier experiments; by $k\simeq100$ most methods stabilize, whereas MKA is already consistent at small $k$ (Fig.~\ref{fig:gauss-fig}(m)).
\subsection{Evaluation using Representation Similarity (ReSi) Benchmark}
Representation Similarity (ReSi) Benchmark~\citep{klabunde2024resi} is a collection of six different tests to assess the performance of representational similarity or alignment metrics.
The tests are Correlation to Accuracy Difference (correlates the alignment score of a pair of models with the absolute difference in their accuracies), Correlation to Output Difference (correlates alignment metrics with the instance-wise disagreement and Jensen-Shannon divergence of the predictions), Label Randomization (evaluates whether alignment metrics can separate models trained with varying levels of label corruption), Shortcut Affinity (evaluates whether alignment metrics can distinguish models trained with spurious shortcut features at different shortcut–label correlation strengths), Augmentation (evaluates whether alignment metrics can stratify models trained with varying augmentation strengths, when all are tested on the same clean, non-augmented set), and Layer Monotonicity (evaluates whether alignment score decreases as the distance between layers increases within the same model). We used the ReSi tests on vision, natural language processing (NLP), and graph domain tasks. For the vision task, we used the ImageNet-100 dataset and seven representative networks from three different architectures: Residual Networks (ResNet-18, ResNet-34, ResNet-101)~\citep{he2016deep}, Visual Geometry Group networks (VGG-11, VGG-19)~\citep{simonyan2014very}, and Vision Transformers (ViT B32, ViT L32)~\citep{dosovitskiy2020image}. For the language task, we used the MNLI dataset~\citep{williams2017broad} and two language models: BERT~\citep{devlin2019bert} and ALBERT~\citep{lan2019albert}. For the graph data, we explored three different datasets: Cora \citep{kipf2016semi}, Flickr~\citep{hamilton2017inductive}, and OGBN-Arxiv~\citep{velivckovic2017graph}, and four different graph networks: Graph Convolutional Network (GCN)~\citep{yang2016revisiting}, Graph Sample and Aggregate (SAGE)~\citep{zeng2019graphsaint}, Graph Attention Network (GAT)~\citep{hu2020open}, and Position-aware Graph Neural Networks (PGNN)~\citep{you2019position}. The original ReSi benchmark concluded that no method consistently outperforms others across domains. We expect to find a similar result here as well.
Figure~\ref{fig:resi} summarizes mean-rank distributions per domain (lower is better). We used $k=100$ to compute the nearest neighbor graphs.
In the vision domain, MKA attains the best central tendency with the tightest spread, edging out kCKA and clearly outperforming CKA and RTD (Fig.~\ref{fig:resi}(a)).
On the other hand, in the NLP domain, CKA (and with $\delta=0.45$) is a clear winner (mean, median, and variance); however, MKA remains within striking distance while maintaining a compact dispersion, i.e., it is competitive without the heavy sensitivity to kernel bandwidths Fig.~\ref{fig:resi}(b)).
Finally, for graphs, the methods that focus on local geometry, i.e., MKA, kCKA, and CKA ($\delta=0.2,0.45$), cluster together (same median for all of them and the mean is within $\pm1$).
Overall, MKA delivers top performance in vision, matches the best local methods on graphs, and stays robustly competitive in NLP, making it a consistent, parameter-light choice when a single alignment metric must generalize across modalities.
We should also note that kCKA is equally performative (and better in many cases). Thus, CKA variants using k-Nearest neighbors show a strong correlation with each other.
\subsection{Neural Network Representations}
In this section, we explore the representational similarity using ResNet-50 models trained on the CIFAR-10 dataset.
First, we compute alignment between feature representations extracted from different layers (after activation) of the network to investigate how representational structure evolves across the depth of the model (Fig.~\ref{fig:resnet18}(a)).
We considered only CKA, kCKA, and MKA for this experiment (as other methods have been explored elsewhere) and highlight how these three competing methods process information.
Using CKA, we can reproduce the famous block structure~\citep{kornblith2019similarity,nguyen2022origins}.
As we said previously, dominant clusters cause the block structure~\citep{nguyen2022origins}.
However, when a k-nearest neighbor graph constrains the kernel, this block structure disappears. For kCKA, block structure appears in the early layers, but they less pronounced in the later layers.
$\mka$ takes it to its limit, the block structure is even less pronounced throughout the network, and it disappears in the later layers, indicating some perturbation as the data flows within the network. Overall, CKA is sensitive to dominant high-density regions of large distances in the distance matrix compared to kCKA, and MKA is even less so. When we compare features from ten randomly initialized ResNet-18 networks, this block structure is still present for $\cka$, less pronounced for kCKA, and disappears in the latter layers for $\mka$ (Fig.~\ref{fig:resnet18}(b)).
This suggests that the same architecture, under different random initializations, can converge to distinct internal orientations, i.e., manifold-level perturbations of the learned representation, despite similar test accuracy.
\subsection{Computational complexity}
Let's assume the two representations have n samples each with $d_1$ and $d_2$ dimensions, respectively. Most algorithms rely on nearest neighbor search and matrix multiplications. Particularly, constructing the k-nearest neighbor graphs ($O(n^2(d+\log k)$) is the costliest operation within many of them. Additionally, MKA relies on bisection method to compute the $\sigma_i$ values (Eq.~\ref{eq:UMAP_HIGH_DIM}) with a complexity $O(nk\log(\Delta/\epsilon))$, where $\Delta$ is the search range and $\epsilon$ is the tolerance. For MKA, $\log(\Delta/\epsilon)=\log(1000/10^{12})\simeq50$ is a constant, which we ignore. Overall, the complexity of the MKA is $O(n^2(1+d_1+d_2+\log k+nk))$. The complexity of the other algorithms is: kCKA - $O(n^3+n^2(d_1+d_2+\log k)$, CKA - $O(n^3+n^2(d_1+d_2))$. Thus, all these methods have cubic complexity in n. The compelxity of SVCCA is $O(nd_1\min(n,d_1)+nd_2\min(n,d_2))$ (dominated by the singular value decomposition). RTD complexity depends on two factors: computing the distance matrix, which is the same as others, and computing the topological barcode, which is cubic in the number of simplexes~\citep{barannikov2021representation}. IMD is dominated by constructing the k-NN graph and performing m-steps of stochastic Lanczos quadrature algorithm with $n_v$ starting vectors ($O(n_v(m\log m+knm)$), giving an overall complexity of $O(n^2(d_1+d_2+\log k)+n_v(m\log m+knm))$~\citep{tsitsulin2019shape}. On the other hand, the space complexity is roughly the same for all the algorithms, primarily to store the kernel matrices, and thus it is dominated by the $O(n^2)$ term (or $O(nk)$ if only k-NN graphs are stored).
\section{Discussion and Conclusions}
In this paper, we introduced Manifold-approximated Kernel Alignment (MKA) and characterized it using several datasets. Here, we computed the kernel matrix and compared it to CKA (and its variations) and other topological metrics on equal terms. We found that methods applying k-NN graph are suitable for comparing topological structures (MKA and kCKA in Figs.~\ref{fig:swiss_s_fig},\ref{fig:ranking_rings}, and \ref{fig:clusters_break}) and sometimes even better than their topological counterparts. Compared to other methods, MKA is less sensitive to hyperparameters. By analyzing Gaussian distributions and their perturbations (Fig.~\ref{fig:gauss-fig}), we showed that methods that rely on local neighborhoods show less sensitivity to intrinsic parameters of datasets (number of samples and dimensionality). However, most methods require hyperparameter tuning. Like previously, MKA shows the most consistent behavior and is not reliant on hyperparameter tuning. When tested with uniform spots and their translation, we found MKA to be robust, even compared to other topological methods (Fig.~\ref{fig:gauss-fig}(j-m)). We then show that MKA is competitive with contemporary methods across a wide range of tasks on the ReSI benchmark (Fig.~\ref{fig:resi}). By analyzing representations of neural networks, we conclude that $\mka$ perceives the neural network representations differently than $\cka$, with kCKA working as an intermediate step.
CKA is globally density-weighted: a single high-density region of large distances can dominate the score. kCKA mitigates this by restricting interactions to local k-NN neighborhoods, making it less susceptible to interactions from large distances. MKA goes further by ordering neighbors within each neighborhood and assigning weights that depend on rank and local density.
In essence, vanilla CKA ignores ranks and depends solely on pairwise distances, while kCKA merely dichotomizes pairs into ``within-k'' vs ``outside-k'' and treats the k nearest neighbors essentially uniformly.
RTD, a topological approach, sometimes tracks true topology and other times behaves like CKA. Our hypothesis is scale: RTD relies on persistence across scales (barcodes), whereas kCKA and MKA are single-scale (k-NN).
At a fixed k, the k-NN graph is either faithful or not; persistence, by averaging over scales, can smooth away local structure, occasionally drifting toward density-driven behavior.
Future works could explore other kernel functions, e.g., effective resistance~\citep{doyle1984random} and diffusion distance~\cite{coifman2006diffusion}, and focus on additional debiasing techniques~\citep{sucholutsky2023getting}. This technique would find usage wherever alignment is beneficial, e.g., in neuroscience for monitoring brain activity, neural decoding, and brain representation analysis, and graph learning for protein interactions.
\section*{Data and Code Availability}
The data used in this research are generated from public sources. For details, see the supplementary materials. The code used to generate the figures is available at \href{https://github.com/tariqul-islam/mka_paper_code}{https://github.com/tariqul-islam/mka\_paper\_code}.
\section*{Acknowledgment}
Mohammad Tariqul Islam is supported by MIT-Novo Nordisk Artificial Intelligence Fellowship. Special thanks Baju C. Joy and Pengrui Zhang for the discussion.
\bibliographystyle{apalike}
\bibliography{thesis,kernel_alignment,du_references}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\clearpage
\appendix
\thispagestyle{empty}
\onecolumn
%\aistatstitle{Manifold Approximation leads to Robust Kernel Alignment:\\Supplementary Materials}
\section*{Supplementary Material}
In the supplementary material, we provide some additional details and results. Section~\ref{sec:proofs} provides the proofs for MKA. Section~\ref{sec:clusters_data} provides details of the ``clusters'' data experiment. Section~\ref{sec:moresroll} gives details of the Swiss-roll and S-curve. In Section~\ref{sec:more_cka}, we discuss CKA with manifold approximation. In Section~\ref{sec:linear_kernel}, we discuss the linear kernel of kCKA. Section~\ref{sec:rtdimdraw} gives supplementary figures for the experiments in the main text. We provide implementation details in Section~\ref{sec:implementaiton_Details}. Finally, we follow it by detailing the ReSi benchmark in Section~\ref{sec:resi_details} and corresponding supplementary results in Section~\ref{sec:resi_scores}.
\section{Proofs}\label{sec:proofs}
\begin{proof}[(Proof of Theorem~\ref{thm:mka_simple})]
Let $K_UH=\bar{K}$ and $L_UH=\bar{L}$. Then,
\begin{align}
\bar{K}_{ij} &= K_{ij}^{(U)} - \frac{1}{N} \sum_{j} K_{ij}^{(U)} \nonumber \\
&= K_{ij}^{(U)} - \frac{1}{N} D.
\end{align}
Now we can compute the inner product,
\begin{align}
\langle \bar{K}, \bar{K} \rangle &= \sum_{i,j} (K_{ij}^{(U)}-\frac{1}{N} D)^2 \nonumber \\
&= \sum_{i,j} \left( \left (K_{ij}^{(U)} \right)^2 - \frac{2}{N}D K_{ij}^{(U)} + \frac{1}{N^2} D^2 \right) \nonumber \\
&= \sum_{i,j} \left (K_{ij}^{(U)} \right)^2 - \frac{2}{N}D \sum_{i,j} K_{ij}^{(U)} + \frac{1}{N^2} D^2 \sum_{i,j} 1 \nonumber \\
&= \sum_{i,j} \left (K_{ij}^{(U)} \right)^2 - D^2 \nonumber \\
&= \langle K_U, K_U \rangle - D^2
\end{align}
We used the fact that $\sum_{i,j}K_{ij}^{(U)}=ND$ and $\sum_{i,j}1=N^2$. Similarly, $\bar{L}_{ij}=L_{ij}^{(U)} - \frac{1}{N} D$ and $\langle \bar{L}, \bar{L} \rangle = \langle L_U, L_U \rangle - D^2$. Finally,
\begin{align}
\langle \bar{K}, \bar{L} \rangle &= \sum_{i,j} (K_{ij}^{(U)} - \frac{1}{N} D) (L_{ij}^{(U)} - \frac{1}{N} D) \nonumber \\
&= \sum_{i,j} K_{ij}^{(U)} L_{ij}^{(U)} - \frac{1}{N} D (K_{ij}^{(U)}+L_{ij}^{(U)}) - \frac{1}{N^2} D^2 \nonumber \\
&= \sum_{i,j} K_{ij}^{(U)} L_{ij}^{(U)} - D^2 \nonumber \\
&= \langle K_{ij}^{(U)}, L_{ij}^{(U)} \rangle - D^2
\end{align}
\end{proof}
\begin{figure}
\centering
\includegraphics[width=1\linewidth]{figures/cluster_fig.png}
\caption{Alignment for the ``clusters'' data. (a) Point clouds used in the clusters experiment. (b) Alignment using various methods, along with Kendall's rank correlation (higher is better). (c-e) Alignment by varying nearest neighbors, $k$, in (c) IMD, (d) kCKA, and (e) MKA. MKA shows the most robustness to parameters.}
\label{fig:clusters_break}
\end{figure}
\begin{proof}[(Proof of Corollary~\ref{thm:mka_range})]
We start from the inner products,
\begin{align}
\langle K_U, K_U \rangle - D^2 &= \sum_{i,j} \left (K_{ij}^{(U)} \right)^2 -D^2 \nonumber \\
&= \sum_{i,i} 1 + \sum_{i,j, i\neq j} \left (K_{ij}^{(U)} \right)^2 - D^2 \nonumber \\
&= N - D^2 + \sum_{i,j, i\neq j} \left (K_{ij}^{(U)} \right)^2.
\end{align}
Similarly,
\begin{align}
\langle L_U, L_U \rangle - D^2 &= N - D^2 + \sum_{i,j, i\neq j} \left (L_{ij}^{(U)} \right)^2
\end{align}
And finally,
\begin{align}
\langle K_U, L_U \rangle - D^2 &= N - D^2 + \sum_{i,j, i\neq j} K_{ij}^{(U)} L_{ij}^{(U)}
\end{align}
The value $\sum_{i,j, i\neq j} K_{ij}^{(U)} L_{ij}^{(U)}$ can be zero if the nearest neighbors in the kernels do not overlap. Otherwise, this value is positive. Thus, the lower bound is guaranteed when $N>D^2$. The upper bound is due to Cauchy–Schwarz inequality.
\end{proof}
\section{Clusters Data}\label{sec:clusters_data}
Similar to the ``rings'' data, the ``clusters'' data was also compiled by~\cite{barannikov2021representation}. The set consists of 300 points sampled for a 2D normal distribution (mean=$(0,0)$). Then the points are split into $2$, $3$, $\dots$, $12$ by moving them into a circle of radius $10$ (Fig.~\ref{fig:clusters_break}).
Then, we use alignment metrics to compare these formations with the original structure (i.e., one cluster). The target of the experiment is to check whether the metrics can track that the data breaks into multiple clusters. Kendall's rank correlation, $\tau$, can measure this in a statistical sense (for our case, the absolute value is sufficient and thus higher is better).
CKA, SVCCA, and IMD fail to track this clustering behavior, while kCKA, RTD, and MKA capture the ranking quite well (Fig.~\ref{fig:clusters_break}(b)). However, varying the nearest neighbor parameter, $k$, causes different behaviors in different methods. IMD shows inconsistent behavior (Fig.~\ref{fig:clusters_break}(c)). kCKA provides correct ranking only for lower values of $k$; at higher values $k \approx 100$ and above the method fails (Fig.~\ref{fig:clusters_break}(cd). MKA provides correct ranking for all possible values of $k$ (Fig.~\ref{fig:clusters_break}(e)).
\section{Details of Swiss-roll and S-Curve}\label{sec:moresroll}
Swiss-roll and S-curve are parameterized by variable $t\in[0,1]$. S-curve contains an additional control parameter $r\in[0,1]$ that determines the shape. $r=0.5$ gives the familiar S-curve used in many studies. We only consider 2-D shapes in this study.
\begin{align}
\textbf{Swiss-Roll:} \nonumber \\
z &= \frac{3\pi}{2} (1+2t) \\
x_1 &= z \cos(z) \\
x_2 &= z \sin(z) \\
\textbf{S-Curve:} \nonumber \\
z &= 3 \pi (t-r) \\
y_1 &= \sin(z) \\
y_2 &= \sgn(z) (\cos(z)-1)
\end{align}
\section{CKA with Manifold Approximation}\label{sec:more_cka}
\begin{figure*}[t]
\centering
\includegraphics[width=\linewidth]{figures/cka_time.eps}
\caption{Effect of Kernel Approximation on the $\cka$ algorithm. (a) Alignment between Swiss-roll and S-curve. (b,c) Gaussian spots under (b) perturbation and (c) lost-correspondence. $\cka$ with manifold approximation ($\cka(K_U^{(S)},K_L^{(S)})$ behave similar to $\mka$, but with less bias. (d) Computation time for $\cka$ and $\mka$. $\mka$ require much less time than $\cka$ (average of 5 runs). Note that we have excluded the computation time for the kernel matrix.}
\label{fig:cka_time}
\end{figure*}
We can symmetrize the manifold approximated kernel matrix, $K_U$, using the probabilistic t-conorm given by
\begin{align}
K_U^{(S)}=K_U+K_U^T - K_U \circ K_U^T,
\end{align}
where $\circ$ denotes element-wise multiplication. This operation does not guarantee a positive semidefinite kernel. However, we can now directly apply CKA on the approximated kernels $K_U^{(S)}$ and $L_U^{(S)}$. The $\cka$ results obtained from this kernel matrix behave similarly to those of $\mka$ but with less bias (Fig.~\ref{fig:cka_time}(b-c)). However, computing $\mka$ requires much less time compared to $\cka$ (Fig.~\ref{fig:cka_time}(d), using \texttt{NumPy}~\citep{harris2020array}).
\section{Linear vs Non-linear CKA, kCKA}\label{sec:linear_kernel}
Linear and non-linear CKA in its default form provides similar values and has been known empirically~\cite{kornblith2019similarity,davarireliability} and recently, theoretically (Theorem~\ref{theorem:alvarez}, \cite{alvarez2022gaussian}. Following this, we can claim the following for linear and non-linear kCKA:
\begin{corollary}[Linear vs. Non-linear kCKA]\label{theorem:kcka}
$\kcka(K_{\rbf},L) = \kcka(K_{\lin},L)+O(1/\sigma^2)$ as $\sigma\to\infty$. Here, $K_{\rbf}$ is the RBF kernel matrix with bandwidth $\sigma$, $K_{\lin}$ is the linear kernel matrix, and $L$ is any positive definite symmetric kernel matrix.
\end{corollary}
In our implementation of kCKA, we constrained $\sigma$ to be the median of the distances within the k-NN set, which is often small compared to its CKA counterpart. As a result, while linear CKA and non-linear CKA can be equivalent by default, it is hardly the case for kCKA. To make them equivalent, one has to arbitrarily set a large $\sigma$, which we consider an uncommon scenario.
\section{Additional Details of Experiments}\label{sec:rtdimdraw}
From Figs.~\ref{fig:all_dim_data}-\ref{fig:imd_raw} we show additional data for the experiment from Fig.~\ref{fig:gauss-fig}. Figure~\ref{fig:all_dim_data} shows the dependence on the nearest neighbor parameter $k$ to obtain a stable result. Overall, MKA is stable in all scales, while others need a large value of $k$. Figure~\ref{fig:all_t_data} shows additional results for $t=50$ (in the main text, we only showed $t=10$).
In the main text, we scaled RTD and IMD values to $[0,1]$ using an exponential function so that it becomes easier to compare with MKA and CKA variants. Here we show (Figs.~\ref{fig:rtd_raw} and~\ref{fig:imd_raw}) the raw values of RTD and IMD for some of the experiments from Fig.~\ref{fig:gauss-fig}. In many cases, these algorithms don't show any trends. Moreover, their raw values are all over the place.
\clearpage
\begin{figure}[t]
\centering
\includegraphics[width=1\linewidth]{figures/all_dim_data.eps}
\caption{Dependency of the algorithms on nearest neighbor parameter $k$ for various algorithms.}
\label{fig:all_dim_data}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=0.8\linewidth]{figures/translate_t1050.eps}
\caption{Additional Data for the Uniform Spots experiment. (left column) reproducing data from the main text for $t=10$. (right column) data for $t=50$.}
\label{fig:all_t_data}
\end{figure}
\begin{figure}[t]
\centering
\includegraphics[width=0.8\linewidth]{figures/rtd_raw.eps}
\caption{RTD values for a few experiments from Fig.~\ref{fig:gauss-fig}.}
\label{fig:rtd_raw}
\end{figure}
\begin{figure}[t]
\centering
\includegraphics[width=0.8\linewidth]{figures/fig_imd.eps}
\caption{IMD values for a few experiments from Fig.~\ref{fig:gauss-fig}.}
\label{fig:imd_raw}
\end{figure}
\clearpage
\section{Implementation Details}\label{sec:implementaiton_Details}
We used our own implementation of CKA, kCKA, and MKA algorithms. For RTD, IMD, and SVCCA, we used~\cite{barannikov2021representation}'s implementation of the algorithm following examples from the corresponding GitHub repository\footnote{\url{https://github.com/IlyaTrofimov/RTD}}. Additionally, Fig.~\ref{fig:ranking_rings} and~\ref{fig:clusters_break} were also implemented reusing codes from the same repository.
The ReSi benchmark has been implemented from the publicly available repository. The full details of the benchmark are provided in Supplementary Section~\ref{sec:resi_details}.
The ResNet-18 networks have been trained using a standard training procedure (Adam~\cite{kingma2014adam} optimizer with learning rate 0.001, 50 epochs, batch size 128, with a step learning rate schedule at epochs 30 and 40 with gamma 0.1).
All experiments were conducted on a workstation equipped with two NVIDIA RTX 4090 GPUs (24 GB memory each), an AMD Ryzen Threadripper 7960X processor with 24 cores, and 256 GB of system RAM.
Codes are attached as supplementary material for the review.
\section{Details of ReSi Benchmark}\label{sec:resi_details}
\subsection{Summary}
We use the representational similarity measures benchmark ReSi \citep{klabunde2024resi} to evaluate MKA and compare with many other commonly used measures \footnote{\url{https://github.com/mklabunde/resi}}. We adopt the ReSi benchmark design, which grounds representational similarity either by prediction (tests 1–2) or by design (tests 3–6). In each test, we construct a controlled set of models and compare layer‑wise representations on held‑out data. ReSi provides the training protocols, datasets, and reference implementations for 24 baseline similarity measures; we add three new variants - MKA, CKA with RBF kernel, and CKA with RBF kernel and k-NN. The benchmark evaluates measures per test/dataset/model and reports rank‑ and decision‑based metrics accordingly.
Some measures, including PWCCA, Uniformity Difference, and Second-Order Cosine Similarity, are left blank in the result tables, and results for test 5 in the vision domain are also missing. These omissions arise from issues such as numerical instability, the occurrence of negative eigenvalues, prohibitively high runtime, or cases where the measures collapse to identical similarity values across comparisons.
\subsection{Datasets}
\subsubsection{Vision}
ImageNet-100 is a balanced subset of 100 classes sampled from the full ImageNet-1k dataset \citep{russakovsky2015imagenet}. The images are resized and center-cropped to 224×224 for both CNNs and ViTs.
\subsubsection{Language}
MNLI \citep{williams2017broad} is a large-scale natural language inference dataset with three labels: entailment, contradiction, and neutral. It consists of premise–hypothesis pairs sampled from ten text genres. We fine-tune BERT and ALBERT on MNLI and evaluate representations exclusively on the validation-matched split.
\subsubsection{Graphs}
For graph representation similarity tests, we use node classification datasets with fixed splits:
\paragraph{Cora}
A citation network of 2,708 machine learning publications categorized into 7 classes. Each node represents a paper, edges denote citations, and input features are 1,433-dimensional bag-of-words vectors. \citep{yang2016revisiting}
\paragraph{Flickr}
A social network dataset where the nodes represent users, edges represent follow relationships, and node features are 500-dimensional vectors derived from user metadata. The classification task has 7 labels. We subsample 10,000 test nodes for representation extraction. \citep{zeng2019graphsaint}
\paragraph{OGBN-Arxiv}
A large-scale citation network from the Open Graph Benchmark (OGB). Nodes represent 169k CS papers, edges are citation links, and each node has a 128-dimensional feature vector. The classification task involves 40 subject areas. We subsample 10,000 nodes from the test split for representation extraction. \citep{hu2020open}
\subsection{Models}
To ensure broad coverage of architectural families, we adopt representative models from vision, language, and graph domains. All models are trained or fine-tuned under standardized protocols following the ReSi benchmark, and their hidden representations are extracted in a consistent manner for similarity evaluation.
\subsubsection{Vision}
We employ three canonical CNN families and a transformer-based architecture family. These four families allow us to test whether similarity measures generalize across convolutional, residual, and attention-based architectures.
\paragraph{ResNets}
ResNet-18, ResNet-34, ResNet-101, trained from scratch on IN100 using cross-entropy loss and SGD with momentum. These models capture hierarchical convolutional features with residual connections. \citep{he2016deep}
\paragraph{VGGs}
VGG-11 and VGG-19 were trained from scratch under identical optimization schedules. Compared to ResNets, VGGs lack skip connections, providing a useful contrast in representational geometry. \citep{simonyan2014very}
\paragraph{Vision Transformers (ViTs)}
ViT-B/32 and ViT-L/32, initialized from ImageNet-21k pretraining and fine-tuned on IN100. Inputs are tokenized into 32×32 image patches with learnable positional embeddings. \citep{dosovitskiy2020image}
\subsubsection{Language}
We fine-tune two Transformer encoder models on MNLI, and both models are evaluated on the validation-matched split of MNLI.
\paragraph{BERT (base)}
Pre-trained BERT is fine-tuned with a linear learning rate schedule, 10\% warm-up, and maximum learning rate $5 \times 10^{-5}$. \citep{devlin2019bert}
\paragraph{ALBERT}
A parameter-reduced variant of BERT using factorized embeddings and cross-layer parameter sharing. Fine-tuning follows the same hyperparameter schedule as BERT. \citep{lan2019albert}
\subsubsection{Graph}
We use graph neural networks (GNNs) implemented in PyTorch Geometric, covering spectral, spatial, and attention-based designs.
\paragraph{Graph Convolutional Network (GCN)}
A spectral GNN where each layer propagates node features by normalized adjacency matrix multiplication. We train GCNs with two hidden layers for most tests, and extend to five hidden layers for the Layer Monotonicity test to ensure sufficient depth. \citep{kipf2016semi}
\paragraph{GraphSAGE}
A neighborhood-aggregation GNN that samples and aggregates neighbor features using mean aggregation. This model tests inductive generalization properties on large graphs such as Flickr and OGBN-Arxiv. \citep{hamilton2017inductive}
\paragraph{Graph Attention Network (GAT)}
A spatial GNN that computes attention coefficients over neighbors to weigh their contributions. We employ the standard configuration with 8 attention heads. \citep{velivckovic2017graph}
\paragraph{Position-aware GNN (P-GNN)}
A positional-encoding GNN that incorporates relative distance features. Due to computational constraints, P-GNN is evaluated only on Cora and excluded from augmentation tests because DropEdge perturbations are incompatible with its positional encodings. \citep{you2019position}
\subsubsection{Representation Extraction}
For all models across domains, we extract hidden representations in a standardized manner to ensure comparability of similarity measures. Unless otherwise required by a specific test (e.g., test 6: Layer Monotonicity), we always use the last hidden layer before the classifier head. For CNNs (ResNet, VGG), we take the post-global average pooling (GAP) feature vectors, and in the monotonicity test, we also extract intermediate convolutional blocks, with feature maps downsampled to a uniform 7×7 spatial resolution for memory control. For Vision Transformers, we use the [CLS] token from the final transformer block as the representation. For language models (BERT and ALBERT), we primarily use the final-layer [CLS] token embedding to represent each premise–hypothesis pair, while also including mean-pooled token embeddings as an alternative variant. For graph neural networks (GCN, GraphSAGE, GAT, P-GNN), we extract node embeddings from the last hidden layer, and in the monotonicity test, we additionally collect outputs from all intermediate layers. All representations are computed exclusively on held-out validation or test splits (IN100 validation set with 50 images per class, MNLI validation-matched set, and the test nodes of Cora/Flickr/OGBN-Arxiv) to prevent training leakage and to keep sample sizes fixed across similarity measures.
\subsection{Tests}
\subsubsection{Test 1 — Correlation to Accuracy Difference}
If two models differ in accuracy, their representations should differ accordingly. We train ten models per dataset, varying only random seeds, compute accuracies on the test split, and correlate pairwise representational similarity with the absolute accuracy difference.
\subsubsection{Test 2 — Correlation to Output Difference}
Models with similar accuracy can still produce different instance‑level predictions; we correlate representational similarity with (i) disagreement rate between hard labels and (ii) the mean Jensen–Shannon divergence (JSD) between probability vectors.
\subsubsection{Test 3 — Label Randomization}
Distinguish models trained with different degrees of label corruption. Groups are defined by randomization rate (e.g., 0\%, 25\%, 50\%, 75\%, 100\%), with five models per group. We then test if within‑group similarities exceed between‑group similarities.
\subsubsection{Test 4 — Shortcut Affinity}
Detect reliance on artificial shortcut features. We add synthetic label‑leaking features during training and form groups by shortcut “strength.” Each group consists of five independently trained models with different random seeds. A good similarity measure should assign higher similarity within groups of models trained on shortcuts of the same strength than across groups trained with different strengths.
\subsubsection{Test 5 — Augmentation}
Assess whether measures capture robustness to data augmentations. We train one “reference” group on standard data and additional groups with progressively stronger augmentation, but always evaluate on non‑augmented test data. Each group consists of five independently trained models with different random seeds. It is expected that models of the same group should have more similarity than those trained on differently augmented data.
\subsubsection{Test 6 — Layer Monotonicity}
Within a single model, nearby layers should be more similar than distant ones; we check whether similarity decreases with layer distance, and whether ordered pair constraints hold. We use the models from Tests 1–2 (for graphs, we increase the inner layers to five). We extract multiple intermediate layers and then compute (a) conformity to the ordinal constraints and (b) Spearman correlation between similarity and layer distance.
\subsection{Representational Similarity Measures}
\subsubsection{Baseline Measures (from ReSi)}
ReSi covers 24 measures spanning alignment/CCA‑type scores, RSM‑based distances, topology‑based divergences, neighborhood statistics, and simple statistics; we use their official implementations and hyperparameters.
\paragraph{CCA-based measures} ~ \\
PWCCA — Projection-Weighted Canonical Correlation Analysis \citep{morcos2018insights} \\
SVCCA — Singular Vector Canonical Correlation Analysis \citep{raghu2017svcca}
\paragraph{Alignment-based measures} ~ \\
AlignCos — Aligned Cosine Similarity \citep{hamilton2016diachronic} \\
AngShape — Orthogonal Angular Shape Metric \citep{williams2021generalized} \\
HardCorr — Hard Correlation Match \citep{li2015convergent} \\
LinReg — Linear Regression Alignment \citep{kornblith2019similarity} \\
OrthProc — Orthogonal Procrustes \citep{ding2021grounding} \\
PermProc — Permutation Procrustes \citep{williams2021generalized} \\
ProcDist — Procrustes Size-and-Shape Distance \citep{williams2021generalized} \\
SoftCorr — Soft Correlation Match \citep{li2015convergent}
\paragraph{RSM-based measures} ~ \\
CKA — Centered Kernel Alignment \citep{kornblith2019similarity} \\
DistCorr — Distance Correlation \citep{szekely2007measuring} \\
EOS — Eigenspace Overlap Score \citep{may2019downstream} \\
GULP — Generalized Unsupervised Linear Prediction \citep{boix2022gulp} \\
RSA — Representational Similarity Analysis \citep{kriegeskorte2008representational} \\
RSMDiff — RSM Norm Difference \citep{yin2018dimensionality}
\paragraph{Neighbor-based measures} ~ \\
2nd-Cos — Second-order Cosine Similarity \citep{hamilton2016cultural} \\
Jaccard — k-NN Jaccard Similarity \citep{wang2020towards} \\
RankSim — Rank Similarity \citep{wang2020towards}
\paragraph{Topology-based measures} ~ \\
IMD — Intrinsic Manifold Distance \cite{tsitsulin2019shape} \\
RTD — Representation Topology Divergence \cite{barannikov2021representation}
\paragraph{Statistic-based measures} ~ \\
ConcDiff — Concentricity Difference \citep{wang2020towards} \\
MagDiff — Magnitude Difference \citep{wang2020towards} \\
UnifDiff — Uniformity Difference \citep{wang2020understanding}
\subsubsection{Additional Measures}
In addition to the 24 baseline measures in the ReSi benchmark, we implemented three new kernel-based alignment variants (MKA, CKA with RBF kernel, and CKA with RBF kernel and k-NN).
\paragraph{Manifold Approximated Kernel Alignment (MKA)}
In our implementation, we evaluate MKA under four neighborhood sizes,
namely $k = 15, 50, 100, 200$. These values allow us to probe the trade-off between local geometry (small $k$) and more global manifold structure (large $k$).
\paragraph{CKA with RBF Kernel and $k$-Nearest Neighbors (kCKA)}
In addition to the dense RBF kernel, we also evaluate a sparsified version that restricts non-zero entries to a fixed number of nearest neighbors.
Given a representation set $X = \{x_1, \dots, x_N\}$, we compute pairwise Euclidean distances $d(x_i,x_j) = \|x_i - x_j\|_2$. For each point $x_i$, we retain only its $k$ nearest neighbors, denoted $\mathrm{KNN}(x_i, k)$. The sparsified RBF kernel matrix is then defined as
\begin{equation}
K_{ij} =
\begin{cases}
\exp \!\left( - \dfrac{d(x_i,x_j)}{2\sigma} \right), & \text{if } x_j \in \mathrm{KNN}(x_i,k), \\[8pt]
0, & \text{otherwise},
\end{cases}
\end{equation}
where the bandwidth parameter $\sigma$ is chosen as the median distance
among all retained neighbor pairs.
The final CKA score between two representation sets $X$ and $Y$, with
sparsified RBF kernels $K$ and $L$, is computed in the same way as standard
CKA using the normalized HSIC formulation:
\begin{equation}
\mathrm{CKA}(K,L) =
\frac{\langle K H, L H \rangle}
{\sqrt{\langle K H, K H \rangle \; \langle L H, L H \rangle}},
\end{equation}
where $H = I - \tfrac{1}{N}\mathbf{1}\mathbf{1}^\top$ is the centering matrix. In our experiments, we set the neighborhood size to $k = 100$, so that each instance is only connected to its 100 nearest neighbors in the kernel matrix.
\clearpage
\section{ReSI Benchmark Scores}\label{sec:resi_scores}
\subsection{Vision Task}
\begin{table}[htbp]
\caption{Results of Test 1 (Correlation to Accuracy Difference) for the vision domain on ImageNet-100}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{
\begin{tabular}{l|ccccccc}
\toprule
\multicolumn{1}{c|}{\textbf{Test}} &
\multicolumn{7}{c}{\textbf{Accuracy Correlation}} \\
\multicolumn{1}{c|}{\textbf{Dataset}} &
\multicolumn{7}{c}{\textbf{IN100}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
RNet18 & RNet34 & RNet101 & VGG11 & VGG19 & ViT B32 & ViT L32 \\
\midrule
CKA & \bf 0.33 & -0.09 & 0.08 & 0.02 & -0.22 & -0.23 & 0.02 \\
CKA ($\delta=0.45$) & 0.29 & -0.06 & 0.01 & 0.00 & -0.10 & -0.28 & 0.09 \\
CKA ($\delta=0.2$) & 0.02 & 0.19 & -0.08 & -0.14 & -0.23 & -0.15 & 0.02 \\
kCKA ($k=100$) & 0.00 & -0.01 & \bf 0.15 & 0.09 & \bf 0.58 & -0.16 & \bf 0.10 \\
SVCCA & 0.29 & \bf 0.27 & 0.00 & -0.04 & -0.30 & -0.01 & -0.17 \\
RTD & 0.26 & -0.01 & -0.20 & \bf 0.15 & 0.07 & -0.10 & \bf 0.10 \\
IMD & 0.17 & -0.20 & 0.12 & 0.06 & -0.11 & -0.26 & -0.16 \\
MKA ($k=100$) & 0.17 & -0.14 & -0.10 & -0.03 & -0.04 & \bf 0.26 & 0.09 \\
\midrule
\midrule
CKA (linear) & 0.36 & -0.07 & 0.16 & 0.03 & -0.20 & -0.26 & 0.05 \\
MKA ($k=15$) & 0.15 & -0.23 & -0.10 & -0.03 & -0.08 & 0.24 & 0.16 \\
MKA ($k=50$) & 0.17 & -0.16 & -0.10 & -0.04 & -0.05 & 0.26 & 0.11 \\
MKA ($k=200$) & 0.16 & -0.14 & -0.11 & -0.04 & -0.01 & 0.26 & 0.08 \\
\midrule
AlignedCosineSimilarity & -0.08 & -0.35 & -0.01 & -0.13 & -0.12 & 0.07 & 0.05 \\
ConcentricityDifference & -0.11 & 0.34 & -0.04 & -0.11 & -0.13 & 0.00 & 0.18 \\
DistanceCorrelation & 0.31 & -0.08 & 0.08 & 0.03 & -0.21 & -0.26 & 0.03 \\
EigenspaceOverlapScore & 0.05 & -0.17 & 0.11 & -0.22 & 0.08 & 0.47 & 0.03 \\
Gulp & 0.02 & -0.18 & 0.12 & -0.17 & 0.10 & 0.28 & 0.04 \\
HardCorrelationMatch & 0.21 & 0.13 & -0.01 & -0.01 & -0.03 & 0.35 & -0.17 \\
JaccardSimilarity & -0.11 & -0.13 & -0.06 & -0.22 & 0.06 & -0.02 & 0.26 \\
LinearRegression & 0.19 & -0.11 & 0.09 & -0.04 & 0.09 & -0.01 & 0.05 \\
MagnitudeDifference & -0.16 & 0.02 & -0.08 & -0.07 & -0.12 & 0.07 & 0.15 \\
OrthogonalAngularShapeMetricCentered & 0.21 & -0.16 & 0.15 & -0.02 & 0.03 & 0.07 & 0.06 \\
OrthogonalProcrustesCenteredAndNormalized & 0.21 & -0.16 & 0.15 & -0.02 & 0.03 & 0.07 & 0.06 \\
PermutationProcrustes & 0.07 & 0.09 & 0.08 & 0.14 & -0.02 & -0.06 & -0.33 \\
ProcrustesSizeAndShapeDistance & 0.08 & 0.00 & 0.14 & 0.13 & 0.08 & 0.16 & 0.05 \\
RSA & 0.06 & -0.17 & 0.09 & 0.24 & -0.35 & -0.12 & -0.11 \\
RSMNormDifference & 0.09 & -0.10 & 0.11 & -0.04 & -0.08 & 0.01 & -0.06 \\
RankSimilarity & 0.09 & 0.03 & 0.13 & -0.01 & 0.05 & 0.18 & 0.36 \\
SecondOrderCosineSimilarity & -0.08 & -0.15 & 0.05 & -0.20 & -0.18 & -0.22 & 0.17 \\
SoftCorrelationMatch & 0.27 & 0.08 & 0.04 & -0.03 & -0.10 & 0.36 & -0.19 \\
UniformityDifference & -0.18 & —— & -0.02 & 0.17 & -0.04 & —— & —— \\
\bottomrule
\end{tabular}
}
\end{table}
\begin{table}[htbp]
\caption{Results of Test 2 (Correlation to Output Difference) for the vision domain on ImageNet-100}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{%
\begin{tabular}{l|ccccccc|ccccccc}
\toprule
\multicolumn{1}{c|}{\textbf{Test}} &
\multicolumn{7}{c|}{\textbf{JSD Correlation}} &
\multicolumn{7}{c}{\textbf{Disagreement Correlation}} \\
\multicolumn{1}{c|}{\textbf{Dataset}} &
\multicolumn{7}{c|}{\textbf{IN100}} &
\multicolumn{7}{c}{\textbf{IN100}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
RNet18 & RNet34 & RNet101 & VGG11 & VGG19 & ViT B32 & ViT L32 &
RNet18 & RNet34 & RNet101 & VGG11 & VGG19 & ViT B32 & ViT L32 \\
\midrule
CKA & \bf 0.26 & 0.02 & 0.30 & -0.09 & 0.04 & 0.02 & -0.12 & \bf 0.36 & 0.00 & 0.29 & -0.01 & -0.25 & 0.00 & -0.05 \\
CKA ($\delta=0.45$) & 0.20 & -0.13 & 0.30 & -0.08 & 0.19 & 0.03 & -0.21 & 0.33 & -0.14 & 0.35 & -0.00 & -0.21 & 0.04 & -0.16 \\
CKA ($\delta=0.2$) & 0.07 & -0.03 & -0.26 & 0.02 & -0.13 & 0.32 & -0.61 & 0.35 & -0.18 & 0.08 & \bf 0.39 & \bf 0.13 & 0.35 & -0.48 \\
kCKA ($k=100$) & -0.34 & \bf 0.35 & \bf 0.43 & -0.02 &\bf 0.24 & 0.32 & -0.32 & -0.56 & \bf 0.45 & 0.18 & -0.18 & -0.17 & 0.22 & -0.17 \\
SVCCA & 0.21 & -0.00 & 0.25 & -0.11 & 0.16 & 0.05 & 0.18 & 0.39 & 0.07 & 0.15 & 0.03 & 0.01 & -0.06 & 0.07 \\
RTD & -0.13 & -0.06 & 0.23 & -0.03 & -0.11 & 0.08 & -0.13 & 0.03 & -0.20 & 0.43 & -0.16 & -0.28 & -0.00 & -0.17 \\
IMD & -0.10 & 0.02 & 0.20 & \bf 0.21 & 0.09 & \bf 0.43 & \bf 0.05 & -0.03 & -0.06 & 0.24 & 0.30 & -0.02 & 0.25 & \bf 0.07 \\
MKA ($k=100$) & 0.11 & 0.25 & 0.39 & 0.08 & 0.22 & 0.39 & -0.25 & 0.09 & 0.08 & 0.30 & 0.04 & -0.17 & 0.27 & -0.13 \\
\midrule
\midrule
CKA (linear) & 0.30 & 0.08 & 0.30 & -0.13 & -0.06 & 0.04 & -0.07 & 0.37 & 0.08 & 0.24 & 0.01 & -0.24 & 0.00 & -0.02 \\
MKA ($k=15$) & 0.17 & 0.12 & 0.35 & 0.12 & 0.24 & 0.38 & -0.28 & 0.11 & -0.01 & 0.37 & 0.10 & -0.15 & 0.41 & -0.22 \\
MKA ($k=50$) & 0.13 & 0.23 & 0.39 & 0.08 & 0.22 & 0.38 & -0.25 & 0.13 & 0.06 & 0.41 & 0.07 & -0.20 & 0.39 & -0.21 \\
MKA ($k=200$) & 0.11 & 0.25 & 0.39 & 0.08 & 0.22 & 0.39 & -0.24 & 0.12 & 0.07 & 0.44 & 0.08 & -0.22 & 0.39 & -0.20 \\
\midrule
AlignedCosineSimilarity & 0.08 & 0.05 & 0.38 & 0.10 & -0.20 & -0.22 & -0.06 & 0.20 & 0.50 & 0.17 & 0.16 & -0.13 & -0.08 & 0.00 \\
ConcentricityDifference & -0.29 & 0.24 & -0.11 & -0.17 & -0.13 & -0.11 & -0.37 & -0.08 & 0.00 & -0.20 & -0.11 & -0.06 & -0.08 & -0.29 \\
DistanceCorrelation & 0.26 & 0.05 & 0.31 & -0.10 & 0.04 & 0.05 & -0.12 & 0.36 & 0.01 & 0.30 & -0.00 & -0.25 & 0.02 & -0.05 \\
EigenspaceOverlapScore & 0.09 & 0.49 & 0.33 & -0.11 & 0.15 & -0.18 & -0.28 & 0.11 & 0.25 & 0.15 & -0.31 & -0.41 & 0.01 & -0.15 \\
Gulp & 0.07 & 0.49 & 0.35 & -0.05 & 0.15 & -0.09 & -0.28 & 0.09 & 0.27 & 0.13 & -0.27 & -0.41 & 0.07 & -0.15 \\
HardCorrelationMatch & 0.28 & 0.31 & 0.02 & -0.22 & -0.05 & 0.03 & -0.26 & 0.28 & -0.06 & -0.07 & -0.15 & -0.18 & 0.27 & -0.16 \\
JaccardSimilarity & 0.35 & 0.26 & 0.31 & 0.04 & 0.32 & 0.46 & -0.30 & 0.25 & 0.47 & 0.25 & 0.14 & -0.12 & 0.33 & -0.18 \\
LinearRegression & 0.21 & 0.21 & 0.41 & -0.01 & 0.25 & -0.09 & -0.14 & 0.19 & 0.25 & 0.30 & -0.17 & -0.23 & 0.04 & -0.07 \\
MagnitudeDifference & -0.38 & -0.20 & 0.01 & -0.16 & -0.28 & 0.02 & -0.32 & -0.17 & -0.22 & -0.04 & -0.09 & 0.04 & -0.01 & -0.22 \\
OrthogonalAngularShapeMetricCentered & 0.24 & 0.22 & 0.34 & -0.01 & 0.19 & -0.26 & -0.15 & 0.24 & 0.40 & 0.20 & -0.13 & -0.33 & -0.11 & -0.06 \\
OrthogonalProcrustesCenteredAndNormalized & 0.24 & 0.22 & 0.34 & -0.02 & 0.19 & -0.26 & -0.15 & 0.24 & 0.40 & 0.20 & -0.13 & -0.33 & -0.11 & -0.06 \\
PermutationProcrustes & 0.18 & 0.18 & 0.27 & -0.18 & 0.06 & 0.36 & -0.06 & 0.13 & -0.25 & -0.04 & 0.02 & 0.20 & 0.37 & 0.10 \\
ProcrustesSizeAndShapeDistance & 0.10 & 0.14 & 0.39 & -0.05 & 0.27 & -0.05 & 0.02 & 0.08 & -0.08 & 0.11 & -0.10 & -0.07 & -0.07 & 0.08 \\
RSA & 0.12 & 0.18 & 0.09 & -0.18 & -0.19 & -0.11 & -0.20 & 0.19 & 0.33 & 0.11 & -0.05 & 0.04 & 0.00 & -0.04 \\
RSMNormDifference & -0.41 & -0.22 & 0.30 & -0.27 & 0.07 & 0.02 & -0.28 & -0.18 & -0.20 & 0.19 & -0.01 & -0.03 & -0.21 & -0.17 \\
RankSimilarity & -0.13 & -0.01 & 0.24 & 0.03 & 0.05 & 0.25 & -0.09 & -0.09 & -0.04 & 0.05 & 0.03 & -0.34 & 0.15 & -0.30 \\
SecondOrderCosineSimilarity & -0.13 & 0.16 & 0.28 & 0.07 & -0.29 & 0.43 & -0.35 & -0.20 & 0.45 & 0.11 & 0.11 & -0.07 & 0.19 & -0.27 \\
SoftCorrelationMatch & 0.45 & 0.27 & 0.11 & -0.04 & -0.17 & 0.01 & -0.31 & 0.46 & -0.13 & -0.06 & -0.03 & -0.29 & 0.27 & -0.16 \\
UniformityDifference & -0.34 & —— & -0.40 & 0.04 & -0.17 & —— & -0.01 & —— & -0.27 & 0.17 & 0.39 & —— & —— & —— \\
\bottomrule
\end{tabular}%
}
\end{table}
\begin{table}[htbp]
\caption{Results of Test 3 (Label Randomization) for the vision domain on ImageNet-100}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{%
\begin{tabular}{l|ccccccc|ccccccc}
\toprule
\multicolumn{1}{c|}{\textbf{Evaluation}} &
\multicolumn{7}{c|}{\textbf{AUPRC}} &
\multicolumn{7}{c}{\textbf{Conformity Rate}} \\
\multicolumn{1}{c|}{\textbf{Dataset}} &
\multicolumn{7}{c|}{\textbf{IN100}} &
\multicolumn{7}{c}{\textbf{IN100}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
RNet18 & RNet34 & RNet101 & VGG11 & VGG19 & ViT B32 & ViT L32 &
RNet18 & RNet34 & RNet101 & VGG11 & VGG19 & ViT B32 & ViT L32\\
\midrule
CKA
& \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.77 & 0.52 & 0.81 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.77 & 0.65 & 0.83 \\
CKA ($\delta=0.45$)
& \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.77 & 0.78 & 0.86 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.75 & 0.81 & 0.87 \\
CKA ($\delta=0.2$)
& 0.65 & 0.70 & 0.82 & 0.80 & 0.76 & 0.77 & \bf 0.97 & 0.71 & 0.74 & 0.87 & 0.84 & 0.73 & 0.75 & \bf 0.99 \\
kCKA ($k=100$)
& \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.88 & 0.73 & 0.57 & 0.90 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.95 & 0.70 & 0.73 & 0.97\\
SVCCA
& 1.00 & 0.94 & 0.95 & 0.90 & 0.78 & 0.52 & 0.58 & \bf 1.00 & 0.97 & 0.96 & 0.89 & 0.89 & 0.74 & 0.77\\
RTD
& \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 0.95 & 0.44 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 0.95 & 0.87 \\
IMD & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.73 & 0.80 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.78 & 0.80\\
MKA ($k=100$) & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.94 & 0.73 & 0.75 & 0.80 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.96 & 0.70 & 0.81 & 0.79\\
\midrule
\midrule
CKA (linear) & 1.00 & 1.00 & 1.00 & 1.00 & 0.77 & 0.57 & 0.89 & 1.00 & 1.00 & 1.00 & 1.00 & 0.75 & 0.73 & 0.89\\
MKA ($k=15$) & 1.00 & 1.00 & 1.00 & 0.98 & 0.73 & 0.76 & 0.81 & 1.00 & 1.00 & 1.00 & 0.98 & 0.70 & 0.82 & 0.86\\
MKA ($k=50$) & 1.00 & 1.00 & 1.00 & 0.97 & 0.73 & 0.75 & 0.80 & 1.00 & 1.00 & 1.00 & 0.98 & 0.70 & 0.81 & 0.79\\
MKA ($k=200$) & 1.00 & 1.00 & 1.00 & 0.92 & 0.70 & 0.75 & 0.80 & 1.00 & 1.00 & 1.00 & 0.95 & 0.69 & 0.81 & 0.78\\
\midrule
AlignedCosineSimilarity & 0.72 & 0.72 & 0.85 & 0.72 & 0.46 & 0.58 & 1.00 & 0.83 & 0.83 & 0.94 & 0.83 & 0.55 & 0.75 & 1.00\\
ConcentricityDifference & 0.99 & 1.00 & 1.00 & 1.00 & 0.57 & 0.73 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.84 & 0.90 & 1.00 \\
DistanceCorrelation & 1.00 & 1.00 & 1.00 & 1.00 & 0.77 & 0.57 & 0.90 & 1.00 & 1.00 & 1.00 & 1.00 & 0.77 & 0.76 & 0.90\\
EigenspaceOverlapScore & 0.84 & 0.72 & 0.70 & 0.55 & 0.50 & 0.62 & 0.70 & 0.95 & 0.83 & 0.73 & 0.75 & 0.59 & 0.79 & 0.75\\
Gulp & 0.89 & 0.72 & 0.60 & 0.88 & 0.44 & 0.53 & 0.66 & 0.97 & 0.84 & 0.91 & 0.96 & 0.64 & 0.85 & 0.93\\
HardCorrelationMatch & 0.72 & 0.72 & 0.72 & 1.00 & 0.60 & 0.53 & 0.91 & 0.83 & 0.83 & 0.83 & 1.00 & 0.67 & 0.68 & 0.94\\
JaccardSimilarity & 1.00 & 1.00 & 1.00 & 1.00 & 0.73 & 0.49 & 0.83 & 1.00 & 1.00 & 1.00 & 1.00 & 0.70 & 0.69 & 0.87\\
MagnitudeDifference & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.71 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.82\\
OrthogonalAngularShapeMetricCentered & 0.72 & 0.72 & 1.00 & 1.00 & 0.80 & 0.55 & 0.91 & 0.83 & 0.83 & 1.00 & 1.00 & 0.82 & 0.70 & 0.94\\
OrthogonalProcrustesCenteredAndNormalized & 0.72 & 0.72 & 1.00 & 1.00 & 0.80 & 0.55 & 0.91 & 0.83 & 0.83 & 1.00 & 1.00 & 0.82 & 0.70 & 0.94\\
PermutationProcrustes & 0.70 & 0.70 & 0.71 & 1.00 & 0.72 & 0.42 & 0.70 & 0.67 & 0.67 & 0.75 & 1.00 & 0.83 & 0.50 & 0.78\\
ProcrustesSizeAndShapeDistance & 0.72 & 0.71 & 0.73 & 1.00 & 0.72 & 0.70 & 0.75 & 0.83 & 0.76 & 0.85 & 1.00 & 0.83 & 0.67 & 0.77\\
RSA & 0.75 & 0.72 & 1.00 & 1.00 & 0.76 & 0.49 & 0.86 & 0.89 & 0.83 & 1.00 & 1.00 & 0.75 & 0.63 & 0.86\\
RSMNormDifference & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.75 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.78\\
RankSimilarity & 1.00 & 1.00 & 1.00 & 0.99 & 0.73 & 0.74 & 0.77 & 1.00 & 1.00 & 1.00 & 1.00 & 0.72 & 0.78 & 0.85\\
SecondOrderCosineSimilarity & 0.71 & 0.71 & 0.71 & 0.70 & 0.70 & 0.70 & 0.69 & 0.79 & 0.75 & 0.74 & 0.67 & 0.67 & 0.75 & 0.67\\
SoftCorrelationMatch & 0.72 & 0.72 & 0.72 & 0.85 & 0.46 & 0.52 & 0.91 & 0.83 & 0.83 & 0.83 & 0.96 & 0.56 & 0.62 & 0.94\\
UniformityDifference & 0.42 & 0.42 & 0.53 & 0.75 & 0.57 & 0.52 & 0.20 & 0.62 & 0.57 & 0.72 & 0.90 & 0.81 & 0.67 & 0.32\\
\bottomrule
\end{tabular}%
}
\end{table}
\begin{table}[htbp]
\centering
\caption{Results of Test 4 (Shortcut Affinity) for the vision domain on ImageNet-100}
\label{tab:grounding-comparison}
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{%
\begin{tabular}{l|ccccccc|ccccccc}
\toprule
\multicolumn{1}{c|}{\textbf{Evaluation}} &
\multicolumn{7}{c|}{\textbf{AUPRC}} &
\multicolumn{7}{c}{\textbf{Conformity Rate}} \\
\multicolumn{1}{c|}{\textbf{Dataset}} &
\multicolumn{7}{c|}{\textbf{IN100}} &
\multicolumn{7}{c}{\textbf{IN100}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
RNet18 & RNet34 & RNet101 & VGG11 & VGG19 & ViT B32 & ViT L32 &
RNet18 & RNet34 & RNet101 & VGG11 & VGG19 & ViT B32 & ViT L32 \\
\midrule
CKA
& \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.86 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.96 \\
CKA ($\delta=0.45$)
& \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.90 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.97 \\
CKA ($\delta=0.2$)
& 0.77 & 0.62 & 0.66 & 0.80 & 0.74 & \bf 1.00 & 0.79 & 0.94 & 0.85 & 0.88 & 0.93 & 0.94 & \bf 1.00 & 0.93 \\
kCKA ($k=100$)
& 0.94 & 0.92 & 0.98 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 0.91 & 0.99 & 0.98 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.97 \\
SVCCA
& 0.55 & 0.68 & 0.51 & 0.68 & 0.29 & 0.60 & 0.28 & 0.81 & 0.84 & 0.81 & 0.91 & 0.62 & 0.82 & 0.57 \\
RTD
& \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.72 & 0.84 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.92 & 0.95 \\
IMD
& 0.66 & 0.77 & 0.56 & 0.78 & 0.67 & 0.38 & 0.31 & 0.87 & 0.88 & 0.75 & 0.92 & 0.77 & 0.74 & 0.66 \\
MKA ($k=100$)
& \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 0.91 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 0.98 \\
\midrule
\midrule
CKA (linear) & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.87 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.96 \\
MKA ($k=15$) & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.92 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.98 \\
MKA ($k=50$) & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.90 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.97 \\
MKA ($k=200$) & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.91 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.98 \\
\midrule
AlignedCosineSimilarity & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.99 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 \\
ConcentricityDifference & 0.53 & 0.70 & 0.50 & 0.78 & 0.27 & 0.28 & 0.25 & 0.83 & 0.86 & 0.81 & 0.95 & 0.67 & 0.58 & 0.64 \\
DistanceCorrelation & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.88 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.96 \\
EigenspaceOverlapScore & 1.00 & 1.00 & 0.99 & 0.95 & 0.88 & 0.93 & 0.93 & 1.00 & 1.00 & 1.00 & 0.98 & 0.96 & 0.98 & 0.97 \\
Gulp & 1.00 & 1.00 & 1.00 & 0.96 & 0.88 & 0.97 & 0.93 & 1.00 & 1.00 & 1.00 & 0.98 & 0.96 & 1.00 & 0.97 \\
HardCorrelationMatch & 0.97 & 1.00 & 0.99 & 0.92 & 0.91 & 0.97 & 0.90 & 0.99 & 1.00 & 1.00 & 0.97 & 0.98 & 0.99 & 0.98 \\
JaccardSimilarity & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.90 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.98 \\
LinearRegression & 1.00 & 1.00 & 1.00 & 1.00 & 0.98 & 0.52 & 0.92 & 1.00 & 1.00 & 1.00 & 1.00 & 0.99 & 0.83 & 0.98 \\
MagnitudeDifference & 0.37 & 0.37 & 0.44 & 0.53 & 0.23 & 0.23 & 0.47 & 0.62 & 0.75 & 0.79 & 0.85 & 0.51 & 0.57 & 0.79 \\
OrthogonalAngularShapeMetricCentered & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 \\
OrthogonalProcrustesCenteredAndNormalized & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 \\
PermutationProcrustes & 0.72 & 0.80 & 0.94 & 0.66 & 0.82 & 0.97 & 0.77 & 0.89 & 0.94 & 1.00 & 0.87 & 0.93 & 0.98 & 0.89 \\
ProcrustesSizeAndShapeDistance & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.89 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.95 \\
RSA & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.72 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.91 \\
RSMNormDifference & 0.57 & 0.42 & 0.59 & 0.87 & 0.59 & 0.50 & 0.47 & 0.81 & 0.71 & 0.83 & 0.97 & 0.80 & 0.82 & 0.69 \\
RankSimilarity & 0.99 & 0.99 & 1.00 & 1.00 & 1.00 & 1.00 & 0.89 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.97 \\
SecondOrderCosineSimilarity & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.92 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.97 \\
SoftCorrelationMatch & 0.97 & 0.98 & 0.99 & 0.90 & 0.84 & 0.98 & 0.94 & 0.99 & 1.00 & 1.00 & 0.97 & 0.96 & 1.00 & 0.99 \\
UniformityDifference & 0.75 & 0.73 & 0.87& 0.60 & 0.55 & 0.61 & 0.17 & 0.90 & 0.91 & 0.97 & 0.83 & 0.83 & 0.84 & 0.00 \\
\bottomrule
\end{tabular}%
}
\end{table}
\begin{table}[htbp]
\caption{Results of Test 6 (Layer Monotonicity) for the vision domain on ImageNet-100}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{%
\begin{tabular}{l|ccccccc|ccccccc}
\toprule
\multicolumn{1}{c|}{\textbf{Evaluation}} &
\multicolumn{7}{c|}{\textbf{Spearman}} &
\multicolumn{7}{c}{\textbf{Conformity Rate}} \\
\multicolumn{1}{c|}{\textbf{Dataset}} &
\multicolumn{7}{c|}{\textbf{IN100}} &
\multicolumn{7}{c}{\textbf{IN100}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
RNet18 & RNet34 & RNet101 & VGG11 & VGG19 & ViT B32 & ViT L32 &
RNet18 & RNet34 & RNet101 & VGG11 & VGG19 & ViT B32 & ViT L32 \\
\midrule
CKA & 0.97 & 0.79 & 0.97 & 0.88 & 0.93 & \bf 1.00 & \bf 1.00 & 0.95 & 0.98 & 0.99 & 0.90 & 0.92 & \bf 1.00 & \bf 1.00 \\
CKA ($\delta=0.45$) & 0.90 & 0.73 & 0.91 & 0.97 & 0.94 & \bf 1.00 & \bf 1.00 & 0.94 & 0.95 & 0.97 & 0.95 & 0.93 & \bf 1.00 & \bf 1.00 \\
CKA ($\delta=0.2$) & 0.97 & 0.89 & 0.84 & \bf 1.00 & 0.84 & 0.95 & \bf 1.00 & 0.96 & 0.96 & 0.96 & \bf 1.00 & 0.94 & 0.93 & \bf 1.00 \\
kCKA ($k=100$) & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 \\
SVCCA & 0.20 & 0.27 & 0.43 & 0.42 & 0.40 & 0.87 & 0.61 & 0.72 & 0.58 & 0.72 & 0.75 & 0.69 & 0.86 & 0.78 \\
RTD & 0.97 & 0.83 & 0.44 & 0.52 & 0.84 & 0.90 & \bf 1.00 & \bf 1.00 & 0.92 & 0.83 & 0.83 & 0.96 & 0.93 & \bf 1.00 \\
IMD & -0.01 & 0.23 & 0.07 & -0.03 & 0.09 & 0.58 & 0.37 & 0.51 & 0.66 & 0.62 & 0.48 & 0.50 & 0.86 & 0.54 \\
MKA ($k=100$) & \bf 1.00 & \bf 1.00 & 0.96 & 0.79 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.99 & 0.82 & \bf 1.00 & \bf 1.00 & \bf 1.00 \\
\midrule
\midrule
CKA (linear) & 0.87 & 0.82 & 0.97 & 0.88 & 0.93 & 1.00 & 1.00 & 0.94 & 0.98 & 1.00 & 0.90 & 0.92 & 1.00 & 1.00 \\
MKA ($k=15$) & 1.00 & 1.00 & 0.96 & 0.80 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.99 & 0.82 & 1.00 & 1.00 & 1.00 \\
MKA ($k=50$) & 1.00 & 1.00 & 0.96 & 0.79 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.99 & 0.80 & 1.00 & 1.00 & 1.00 \\
MKA ($k=200$) & 1.00 & 1.00 & 0.96 & 0.79 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.99 & 0.82 & 1.00 & 1.00 & 1.00 \\
\midrule
AlignedCosineSimilarity & 0.52 & 0.63 & 0.52 & 0.93 & 0.12 & 1.00 & 1.00 & 0.84 & 0.86 & 0.80 & 0.90 & 0.68 & 1.00 & 1.00 \\
ConcentricityDifference & -0.78 & -0.05 & -0.14 & -0.25 & -0.27 & 0.65 & 1.00 & 0.20 & 0.46 & 0.46 & 0.32 & 0.43 & 0.90 & 1.00 \\
DistanceCorrelation & 0.97 & 0.79 & 0.97 & 0.88 & 0.93 & 1.00 & 1.00 & 0.95 & 0.98 & 0.99 & 0.90 & 0.92 & 1.00 & 1.00 \\
EigenspaceOverlapScore & 0.88 & 0.96 & 0.97 & 1.00 & 1.00 & 1.00 & 1.00 & 0.90 & 0.95 & 0.96 & 1.00 & 1.00 & 1.00 & 1.00 \\
Gulp & 0.53 & 0.48 & 0.64 & 1.00 & 1.00 & 1.00 & 1.00 & 0.70 & 0.70 & 0.80 & 1.00 & 1.00 & 1.00 & 1.00 \\
HardCorrelationMatch & 0.01 & 0.53 & 0.76 & 0.91 & 0.74 & 1.00 & 1.00 & 0.61 & 0.85 & 0.89 & 0.92 & 0.93 & 1.00 & 1.00 \\
JaccardSimilarity & 0.55 & 0.65 & 0.78 & 1.00 & 1.00 & 1.00 & 1.00 & 0.85 & 0.90 & 0.91 & 1.00 & 1.00 & 1.00 & 1.00 \\
LinearRegression & 0.55 & 0.96 & 0.93 & 0.55 & 0.78 & 0.99 & 1.00 & 0.85 & 0.95 & 0.91 & 0.85 & 0.91 & 0.99 & 1.00 \\
MagnitudeDifference & -0.37 & 0.13 & 0.14 & 0.21 & 0.28 & 0.84 & 1.00 & 0.35 & 0.46 & 0.55 & 0.64 & 0.65 & 0.86 & 1.00 \\
OrthogonalAngularShapeMetricCentered & 0.55 & 0.65 & 0.65 & 0.96 & 0.99 & 1.00 & 1.00 & 0.85 & 0.90 & 0.90 & 0.97 & 0.98 & 1.00 & 1.00 \\
OrthogonalProcrustesCenteredAndNormalized & 0.55 & 0.65 & 0.65 & 0.96 & 0.99 & 1.00 & 1.00 & 0.85 & 0.90 & 0.90 & 0.97 & 0.98 & 1.00 & 1.00 \\
PermutationProcrustes & 0.20 & 0.60 & 0.39 & 0.69 & 0.14 & 0.71 & 1.00 & 0.63 & 0.70 & 0.61 & 0.71 & 0.59 & 0.70 & 1.00 \\
ProcrustesSizeAndShapeDistance & 0.55 & 0.42 & 0.39 & 0.48 & 0.67 & 0.71 & 1.00 & 0.85 & 0.80 & 0.79 & 0.80 & 0.80 & 0.70 & 1.00 \\
RSA & 0.97 & 0.72 & 0.88 & 0.58 & 0.66 & 1.00 & 1.00 & 0.95 & 0.94 & 0.97 & 0.80 & 0.90 & 1.00 & 1.00 \\
RSMNormDifference & -0.33 & -0.09 & -0.21 & 0.85 & 0.64 & 0.75 & 1.00 & 0.45 & 0.50 & 0.48 & 0.85 & 0.70 & 0.75 & 1.00 \\
RankSimilarity & 0.55 & 0.65 & 0.67 & 1.00 & 1.00 & 1.00 & 1.00 & 0.85 & 0.90 & 0.90 & 1.00 & 1.00 & 1.00 & 1.00 \\
SecondOrderCosineSimilarity & 0.55 & 0.78 & 0.92 & 1.00 & 1.00 & 1.00 & 1.00 & 0.85 & 0.91 & 0.95 & 1.00 & 1.00 & 1.00 & 1.00 \\
SoftCorrelationMatch & 0.11 & 0.50 & 0.70 & 0.52 & 0.64 & 1.00 & 1.00 & 0.72 & 0.85 & 0.88 & 0.80 & 0.89 & 1.00 & 1.00 \\
UniformityDifference & 0.18 & 0.20 & 0.55 & -0.30 & -0.06 & —— & 0.65 & 0.68 & 0.81 & 0.38 & 0.50 & 1.00 & 1.00 & —— \\
\bottomrule
\end{tabular}%
}
\end{table}
\clearpage
\subsection{NLP Task}
\begin{table}[htbp]
\caption{Results of Test 1 (Correlation to Accuracy Difference) on MNLI}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
%\resizebox{\textwidth}{!}{%
\begin{tabular}{l|cc}
\toprule
\multicolumn{1}{c|}{\textbf{Representation}} &
\multicolumn{2}{c}{\textbf{CLS Token}} \\
\multicolumn{1}{c|}{\textbf{Test}} &
\multicolumn{2}{c}{\textbf{Accuracy Correlation}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
BERT & ALBERT \\
\midrule
CKA & \bf 0.11 & \bf 0.26 \\
CKA ($\delta=0.45$) & 0.08 & 0.09 \\
CKA ($\delta=0.2$) & -0.17 & -0.24 \\
kCKA ($k=100) $& 0.03 & -0.24 \\
SVCCA & 0.32 & -0.00 \\
RTD & 0.11 & -0.23 \\
IMD & -0.26 & -0.08 \\
MKA ($k=100$) & -0.17 & -0.26 \\
\midrule
\midrule
CKA (linear) & 0.18 & 0.17 \\
MKA ($k=15$) & -0.16 & -0.24 \\
MKA ($k=50$) & -0.16 & -0.26 \\
MKA ($k=200$) & -0.16 & -0.27 \\
\midrule
AlignedCosineSimilarity & 0.25 & 0.00 \\
ConcentricityDifference & -0.00 & -0.07 \\
DistanceCorrelation & 0.15 & 0.25 \\
EigenspaceOverlapScore & 0.03 & -0.10 \\
Gulp & 0.06 & -0.15 \\
HardCorrelationMatch & 0.04 & 0.21 \\
JaccardSimilarity & -0.21 & -0.25 \\
LinearRegression & 0.20 & 0.04 \\
MagnitudeDifference & 0.22 & -0.06 \\
OrthogonalAngularShapeMetricCentered & 0.28 & 0.12 \\
OrthogonalProcrustesCenteredAndNormalized & 0.27 & 0.12 \\
PWCCA & -0.61 & -0.27 \\
PermutationProcrustes & 0.09 & -0.02 \\
ProcrustesSizeAndShapeDistance & 0.28 & -0.04 \\
RSA & 0.00 & 0.18 \\
RSMNormDifference & 0.30 & -0.15 \\
RankSimilarity & -0.09 & -0.27 \\
SecondOrderCosineSimilarity & -0.26 & -0.25 \\
SoftCorrelationMatch & 0.11 & 0.18 \\
UniformityDifference & 0.14 & -0.16 \\
\bottomrule
\end{tabular}%
%}
\end{table}
\begin{table}[htbp]
\caption{Results of Test 2 (Correlation to Output Difference) on MNLI}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{%
\begin{tabular}{l|cccc|cccc}
\toprule
\multicolumn{1}{c|}{\textbf{Representation}} &
\multicolumn{4}{c|}{\textbf{CLS Token}} &
\multicolumn{4}{c}{\textbf{Mean-pooled Token}} \\
\multicolumn{1}{c|}{\textbf{Test}} &
\multicolumn{2}{c}{\textbf{JSD Correlation}} &
\multicolumn{2}{c|}{\textbf{Disagreement Correlation}} &
\multicolumn{2}{c}{\textbf{JSD Correlation}} &
\multicolumn{2}{c}{\textbf{Disagreement Correlation}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
BERT & ALBERT & BERT & ALBERT & BERT & ALBERT & BERT & ALBERT \\
\midrule
CKA
& 0.36 & \bf 0.29 & 0.15 & \bf 0.57 & 0.51 & 0.06 & 0.38 & 0.18 \\
CKA ($\delta=0.45$)
& 0.31 & 0.05 & \bf 0.50 & 0.22 & \bf 0.53 & 0.02 & \bf 0.49 & 0.16 \\
CKA ($\delta=0.2$)
& -0.25 & -0.11 & 0.27 & -0.04 & 0.13 & -0.06 & -0.25 & 0.05 \\
kCKA ($k=100$)
& 0.27 & 0.03 & 0.33 & -0.00 & 0.45 & 0.05 & 0.41 & 0.10 \\
SVCCA
& \bf 0.47 & 0.12 & 0.00 & 0.33 & 0.46 & \bf 0.12 & 0.13 & 0.14 \\
RTD
& -0.06 & -0.31 & 0.06 & -0.19 & -0.02 & -0.14 & -0.08 & 0.00 \\
IMD
& -0.39 & -0.30 & -0.07 & -0.16 & 0.14 & 0.13 & -0.04 & \bf 0.27 \\
MKA ($k=100$)
& 0.22 & 0.05 & 0.22 & -0.02 & 0.24 & 0.01 & -0.21 & 0.07 \\
\midrule
\midrule
CKA (linear) & 0.30 & 0.28 & -0.01 & 0.57 & 0.45 & 0.07 & 0.33 & 0.16 \\
MKA ($k=15$) & 0.21 & 0.02 & 0.19 & -0.05 & 0.22 & 0.01 & -0.22 & 0.07 \\
MKA ($k=50$) & 0.22 & 0.05 & 0.22 & -0.02 & 0.24 & 0.01 & -0.21 & 0.07 \\
MKA ($k=200$) & 0.23 & 0.06 & 0.23 & -0.01 & 0.24 & 0.01 & -0.21 & 0.08 \\
\midrule
AlignedCosineSimilarity & 0.37 & 0.09 & -0.16 & -0.03 & -0.00 & 0.30 & -0.12 & 0.25 \\
ConcentricityDifference & 0.02 & -0.07 & -0.31 & 0.07 & -0.03 & 0.24 & -0.14 & 0.25 \\
DistanceCorrelation & 0.39 & 0.32 & 0.12 & 0.57 & 0.49 & 0.07 & 0.38 & 0.18 \\
EigenspaceOverlapScore & 0.36 & -0.10 & 0.01 & -0.16 & 0.35 & 0.02 & 0.10 & 0.03 \\
Gulp & 0.39 & -0.05 & 0.05 & -0.11 & 0.38 & 0.03 & 0.10 & 0.02 \\
HardCorrelationMatch & -0.27 & -0.03 & -0.43 & 0.00 & -0.03 & -0.10 & 0.29 & -0.02 \\
JaccardSimilarity & 0.12 & 0.02 & 0.16 & -0.05 & 0.33 & -0.03 & -0.02 & 0.04 \\
LinearRegression & 0.24 & -0.01 & 0.06 & -0.16 & -0.10 & 0.06 & 0.23 & 0.02 \\
MagnitudeDifference & 0.01 & 0.01 & -0.03 & 0.08 & -0.01 & -0.06 & 0.02 & 0.18 \\
OrthogonalAngularShapeMetricCentered & 0.26 & -0.08 & -0.02 & -0.01 & 0.27 & -0.09 & 0.36 & 0.06 \\
OrthogonalProcrustesCenteredAndNormalized & 0.26 & -0.08 & -0.02 & -0.01 & 0.27 & -0.09 & 0.36 & 0.06 \\
PWCCA & -0.32 & 0.32 & 0.13 & 0.32 & 0.35 & 0.45 & -0.20 & 0.38 \\
PermutationProcrustes & -0.06 & 0.04 & -0.30 & -0.04 & -0.05 & 0.07 & -0.28 & 0.16 \\
ProcrustesSizeAndShapeDistance & 0.07 & -0.00 & -0.38 & -0.07 & -0.05 & 0.05 & -0.18 & 0.12 \\
RSA & 0.27 & 0.23 & 0.19 & 0.47 & 0.43 & -0.03 & 0.38 & 0.10 \\
RSMNormDifference & -0.18 & -0.02 & -0.19 & 0.12 & -0.14 & -0.16 & -0.09 & -0.02 \\
RankSimilarity & 0.08 & -0.06 & 0.05 & -0.13 & 0.15 & -0.02 & -0.29 & 0.05 \\
SecondOrderCosineSimilarity & 0.16 & 0.03 & 0.55 & 0.11 & 0.34 & -0.04 & -0.04 & 0.05 \\
SoftCorrelationMatch & -0.23 & -0.02 & -0.42 & 0.01 & 0.00 & -0.03 & 0.31 & 0.02 \\
UniformityDifference & -0.02 & -0.30 & -0.14 & -0.24 & -0.02 & -0.17 & 0.14 & -0.10 \\
\bottomrule
\end{tabular}%
}
\end{table}
\begin{table}[htbp]
\caption{Results of Test 3 (Label Randomization) on MNLI}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{%
\begin{tabular}{l|cccc|cccc}
\toprule
\multicolumn{1}{c|}{\textbf{Representation}} &
\multicolumn{4}{c|}{\textbf{CLS Token}} &
\multicolumn{4}{c}{\textbf{Mean-pooled Token}} \\
\multicolumn{1}{c|}{\textbf{Evaluation}} &
\multicolumn{2}{c}{\textbf{AUPRC}} &
\multicolumn{2}{c|}{\textbf{Conformity Rate}} &
\multicolumn{2}{c}{\textbf{AUPRC}} &
\multicolumn{2}{c}{\textbf{Conformity Rate}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
BERT & ALBERT & BERT & ALBERT & BERT & ALBERT & BERT & ALBERT \\
\midrule
CKA & \bf 0.75 & \bf 0.80 & 0.89 & \bf 0.93 & \bf 0.66 & 0.45 & 0.86 & 0.71 \\
CKA ($\delta=0.45$) & 0.74 & 0.79 & 0.89 & \bf 0.93 & 0.58 & 0.45 & 0.81 & 0.68 \\
CKA ($\delta=0.2$) & 0.56 & 0.54 & 0.82 & 0.75 & 0.45 & 0.31 & 0.66 & 0.53 \\
kCKA ($k=100$) & 0.69 & 0.46 & 0.83 & 0.68 & 0.50 & 0.43 & 0.73 & 0.59 \\
SVCCA & 0.69 & 0.69 & 0.84 & 0.90 & \bf 0.66 & 0.46 & \bf 0.87 & 0.76 \\
RTD & 0.73 & 0.54 & 0.78 & 0.79 & 0.60 & \bf 0.62 & 0.79 & \bf 0.84 \\
IMD & 0.74 & 0.49 & \bf 0.92 & 0.76 & 0.58 & 0.43 & 0.86 & 0.74 \\
MKA ($k=100$) & 0.72 & 0.52 & 0.85 & 0.71 & 0.57 & 0.34 & 0.75 & 0.60 \\
\midrule
\midrule
CKA (linear) & 0.75 & 0.85 & 0.90 & 0.93 & 0.64 & 0.43 & 0.84 & 0.68 \\
MKA ($k=15$) & 0.73 & 0.63 & 0.86 & 0.80 & 0.58 & 0.34 & 0.76 & 0.65 \\
MKA ($k=50$) & 0.73 & 0.55 & 0.86 & 0.74 & 0.58 & 0.35 & 0.76 & 0.61 \\
MKA ($k=200$) & 0.71 & 0.51 & 0.84 & 0.69 & 0.56 & 0.34 & 0.75 & 0.59 \\
\midrule
AlignedCosineSimilarity & 1.00 & 0.68 & 1.00 & 0.91 & 0.80 & 0.65 & 0.94 & 0.80 \\
ConcentricityDifference & 1.00 & 0.81 & 1.00 & 0.89 & 0.76 & 0.52 & 0.90 & 0.78 \\
DistanceCorrelation & 0.75 & 0.79 & 0.89 & 0.93 & 0.66 & 0.50 & 0.86 & 0.72 \\
EigenspaceOverlapScore & 0.62 & 0.70 & 0.88 & 0.88 & 0.57 & 0.76 & 0.86 & 0.90 \\
Gulp & 0.62 & 0.39 & 0.90 & 0.70 & 0.53 & 0.43 & 0.83 & 0.73 \\
HardCorrelationMatch & 0.75 & 0.68 & 0.90 & 0.86 & 0.53 & 0.55 & 0.82 & 0.81 \\
JaccardSimilarity & 0.60 & 0.58 & 0.74 & 0.76 & 0.65 & 0.50 & 0.81 & 0.70 \\
LinearRegression & 0.43 & 0.29 & 0.80 & 0.67 & 0.43 & 0.43 & 0.68 & 0.68 \\
MagnitudeDifference & 0.33 & 0.56 & 0.75 & 0.76 & 0.39 & 0.48 & 0.78 & 0.81 \\
OrthogonalAngularShapeMetricCentered & 0.90 & 0.97 & 0.98 & 0.99 & 0.71 & 0.64 & 0.94 & 0.82 \\
OrthogonalProcrustesCenteredAndNormalized & 0.90 & 0.97 & 0.98 & 0.99 & 0.71 & 0.64 & 0.94 & 0.82 \\
PWCCA & 0.78 & —— & 0.95 & 1.00 & —— & 0.43 & 1.00 & 0.59 \\
PermutationProcrustes & 0.44 & 0.60 & 0.68 & 0.81 & 0.40 & 0.57 & 0.61 & 0.79 \\
ProcrustesSizeAndShapeDistance & 0.98 & 0.84 & 0.99 & 0.96 & 0.69 & 0.62 & 0.87 & 0.79 \\
RSA & 0.47 & 0.61 & 0.69 & 0.81 & 0.47 & 0.43 & 0.66 & 0.66 \\
RSMNormDifference & 1.00 & 1.00 & 1.00 & 1.00 & 0.86 & 0.59 & 0.94 & 0.83 \\
RankSimilarity & 0.57 & 0.48 & 0.73 & 0.65 & 0.57 & 0.36 & 0.78 & 0.63 \\
SecondOrderCosineSimilarity & 0.71 & —— & 0.72 & 1.00 & 0.73 & —— & 0.80 & 1.00 \\
SoftCorrelationMatch & 0.75 & 0.71 & 0.92 & 0.87 & 0.68 & 0.64 & 0.86 & 0.89 \\
UniformityDifference & 0.76 & 0.75 & 0.91 & 0.90 & 0.88 & 0.65 & 0.94 & 0.88 \\
\bottomrule
\end{tabular}%
}
\end{table}
\begin{table}[htbp]
\caption{Results of Test 4 (Shortcut Affinity) on MNLI}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{%
\begin{tabular}{l|cccc|cccc}
\toprule
\multicolumn{1}{c|}{\textbf{Representation}} &
\multicolumn{4}{c|}{\textbf{CLS Token}} &
\multicolumn{4}{c}{\textbf{Mean-pooled Token}} \\
\multicolumn{1}{c|}{\textbf{Evaluation}} &
\multicolumn{2}{c}{\textbf{AUPRC}} &
\multicolumn{2}{c|}{\textbf{Conformity Rate}} &
\multicolumn{2}{c}{\textbf{AUPRC}} &
\multicolumn{2}{c}{\textbf{Conformity Rate}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
BERT & ALBERT & BERT & ALBERT & BERT & ALBERT & BERT & ALBERT \\
\midrule
CKA & 0.59 & 0.63 & \bf 0.88 & 0.67 & 0.55 & 0.56 & \bf 0.85 & 0.64 \\
CKA ($\delta=0.45$) & 0.58 & \bf 0.67 & 0.87 & \bf 0.81 & 0.55 & 0.56 & 0.84 & 0.66 \\
CKA ($\delta=0.2$) & 0.50 & 0.43 & 0.79 & 0.58 & 0.55 & 0.32 & 0.74 & 0.52 \\
kCKA ($k=100$) & 0.57 & 0.61 & 0.83 & 0.65 & 0.56 & \bf 0.58 & 0.82 & 0.66 \\
SVCCA & 0.42 & 0.60 & 0.78 & 0.62 & 0.49 & 0.52 & 0.80 & 0.61 \\
RTD & \bf 0.61 & 0.41 & 0.84 & 0.57 & \bf 0.64 & 0.39 & 0.84 & 0.53 \\
IMD & 0.53 & 0.30 & 0.82 & 0.45 & 0.52 & 0.29 & 0.81 & 0.48 \\
MKA ($k=100$) & 0.56 & 0.59 & 0.81 & 0.65 & 0.56 & 0.54 & 0.80 & \bf 0.67 \\
\midrule
\midrule
CKA (linear) & 0.59 & 0.63 & 0.88 & 0.65 & 0.53 & 0.50 & 0.84 & 0.61 \\
MKA ($k=15$) & 0.56 & 0.59 & 0.81 & 0.66 & 0.56 & 0.51 & 0.80 & 0.66 \\
MKA ($k=50$) & 0.56 & 0.59 & 0.81 & 0.66 & 0.56 & 0.54 & 0.81 & 0.67 \\
MKA ($k=200$) & 0.56 & 0.59 & 0.81 & 0.65 & 0.56 & 0.54 & 0.80 & 0.67 \\
\midrule
AlignedCosineSimilarity & 0.58 & 0.37 & 0.89 & 0.58 & 0.53 & 0.45 & 0.83 & 0.58 \\
ConcentricityDifference & 0.38 & 0.42 & 0.75 & 0.45 & 0.35 & 0.40 & 0.65 & 0.48 \\
DistanceCorrelation & 0.58 & 0.62 & 0.88 & 0.67 & 0.54 & 0.54 & 0.85 & 0.63 \\
EigenspaceOverlapScore & 0.57 & 0.45 & 0.85 & 0.58 & 0.39 & 0.55 & 0.67 & 0.64 \\
Gulp & 0.62 & 0.46 & 0.87 & 0.58 & 0.45 & 0.56 & 0.77 & 0.65 \\
HardCorrelationMatch & 0.55 & 0.35 & 0.82 & 0.60 & 0.32 & 0.41 & 0.75 & 0.61 \\
JaccardSimilarity & 0.56 & 0.58 & 0.81 & 0.63 & 0.56 & 0.59 & 0.82 & 0.70 \\
LinearRegression & 0.36 & 0.43 & 0.69 & 0.48 & 0.29 & 0.45 & 0.64 & 0.60 \\
MagnitudeDifference & 0.48 & 0.56 & 0.81 & 0.77 & 0.30 & 0.30 & 0.58 & 0.47 \\
OrthogonalAngularShapeMetricCentered & 0.60 & 0.48 & 0.90 & 0.65 & 0.57 & 0.49 & 0.87 & 0.64 \\
OrthogonalProcrustesCenteredAndNormalized & 0.60 & 0.48 & 0.90 & 0.65 & 0.57 & 0.49 & 0.87 & 0.64 \\
PWCCA & —— & 0.42 & 1.00 & 0.61 & 0.43 & —— & 0.74 & 1.00 \\
PermutationProcrustes & 0.52 & 0.54 & 0.82 & 0.63 & 0.35 & 0.54 & 0.60 & 0.58 \\
ProcrustesSizeAndShapeDistance & 0.54 & 0.54 & 0.87 & 0.62 & 0.53 & 0.51 & 0.84 & 0.57 \\
RSA & 0.58 & 0.59 & 0.87 & 0.64 & 0.47 & 0.52 & 0.79 & 0.61 \\
RSMNormDifference & 0.28 & 0.57 & 0.66 & 0.69 & 0.36 & 0.44 & 0.68 & 0.61 \\
RankSimilarity & 0.58 & 0.61 & 0.82 & 0.66 & 0.50 & 0.45 & 0.73 & 0.61 \\
SecondOrderCosineSimilarity & 0.59 & 0.56 & 0.82 & 0.60 & 0.58 & 0.51 & 0.83 & 0.64 \\
SoftCorrelationMatch & 0.55 & 0.36 & 0.82 & 0.60 & 0.33 & 0.41 & 0.75 & 0.62 \\
UniformityDifference & 0.38 & 0.32 & 0.68 & 0.49 & 0.33 & 0.44 & 0.68 & 0.52 \\
\bottomrule
\end{tabular}%
}
\end{table}
\begin{table}[htbp]
\caption{Results of Test 5 (Augmentation) on MNLI}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{%
\begin{tabular}{l|cccc|cccc}
\toprule
\multicolumn{1}{c|}{\textbf{Representation}} &
\multicolumn{4}{c|}{\textbf{CLS Token}} &
\multicolumn{4}{c}{\textbf{Mean-pooled Token}} \\
\multicolumn{1}{c|}{\textbf{Evaluation}} &
\multicolumn{2}{c}{\textbf{AUPRC}} &
\multicolumn{2}{c|}{\textbf{Conformity Rate}} &
\multicolumn{2}{c}{\textbf{AUPRC}} &
\multicolumn{2}{c}{\textbf{Conformity Rate}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
BERT & ALBERT & BERT & ALBERT & BERT & ALBERT & BERT & ALBERT \\
\midrule
CKA & 0.44 & \bf 0.85 & \bf 0.84 & 0.84 & \bf 0.36 & \bf 0.74 & \bf 0.79 & \bf 0.78 \\
CKA ($\delta=0.45$) & 0.45 & \bf 0.85 & 0.80 & \bf 0.86 & 0.34 & 0.72 & 0.77 & \bf 0.78 \\
CKA ($\delta=0.2$) & 0.43 & 0.69 & 0.71 & 0.74 & 0.35 & 0.42 & 0.56 & 0.55 \\
kCKA ($k=100$) & 0.37 & 0.76 & 0.71 & 0.78 & 0.35 & 0.63 & 0.73 & 0.73 \\
SVCCA & 0.43 & 0.60 & 0.77 & 0.66 & 0.31 & 0.63 & 0.70 & 0.71 \\
RTD & \bf 0.53 & 0.71 & 0.82 & 0.77 & 0.28 & 0.41 & 0.66 & 0.53 \\
IMD & 0.43 & 0.37 & 0.76 & 0.48 & 0.20 & 0.30 & 0.51 & 0.46 \\
MKA ($k=100$) & 0.34 & 0.77 & 0.71 & 0.79 & 0.33 & 0.52 & 0.65 & 0.68 \\
\midrule
\midrule
CKA (linear) & 0.48 & 0.83 & 0.87 & 0.82 & 0.34 & 0.72 & 0.76 & 0.77 \\
MKA ($k=15$) & 0.34 & 0.77 & 0.72 & 0.79 & 0.32 & 0.52 & 0.64 & 0.68 \\
MKA ($k=50$) & 0.34 & 0.77 & 0.71 & 0.79 & 0.33 & 0.52 & 0.65 & 0.68 \\
MKA ($k=200$) & 0.34 & 0.77 & 0.70 & 0.79 & 0.33 & 0.52 & 0.65 & 0.68 \\
\midrule
AlignedCosineSimilarity & 0.35 & 0.77 & 0.80 & 0.80 & 0.28 & 0.55 & 0.63 & 0.68 \\
ConcentricityDifference & 0.28 & 0.55 & 0.61 & 0.67 & 0.27 & 0.44 & 0.65 & 0.52 \\
DistanceCorrelation & 0.45 & 0.84 & 0.85 & 0.86 & 0.35 & 0.73 & 0.78 & 0.77 \\
EigenspaceOverlapScore & 0.26 & 0.74 & 0.71 & 0.78 & 0.24 & 0.73 & 0.64 & 0.77 \\
Gulp & 0.27 & 0.73 & 0.68 & 0.79 & 0.25 & 0.73 & 0.65 & 0.77 \\
HardCorrelationMatch & 0.24 & 0.65 & 0.67 & 0.76 & 0.21 & 0.63 & 0.57 & 0.84 \\
JaccardSimilarity & 0.35 & 0.74 & 0.74 & 0.78 & 0.32 & 0.63 & 0.71 & 0.73 \\
LinearRegression & 0.33 & 0.48 & 0.74 & 0.71 & 0.34 & 0.61 & 0.64 & 0.76 \\
MagnitudeDifference & 0.16 & 0.60 & 0.45 & 0.79 & 0.28 & 0.68 & 0.65 & 0.86 \\
OrthogonalAngularShapeMetricCentered & 0.37 & 0.91 & 0.84 & 0.94 & 0.29 & 0.81 & 0.75 & 0.82 \\
OrthogonalProcrustesCenteredAndNormalized & 0.37 & 0.91 & 0.84 & 0.94 & 0.29 & 0.81 & 0.75 & 0.82 \\
PWCCA & 0.39 & 0.52 & 0.79 & 0.48 & —— & —— & —— & —— \\
PermutationProcrustes & 0.18 & 0.49 & 0.50 & 0.55 & 0.17 & 0.51 & 0.44 & 0.59 \\
ProcrustesSizeAndShapeDistance & 0.31 & 0.74 & 0.73 & 0.85 & 0.25 & 0.49 & 0.60 & 0.59 \\
RSA & 0.48 & 0.86 & 0.86 & 0.84 & 0.34 & 0.75 & 0.76 & 0.78 \\
RSMNormDifference & 0.36 & 0.85 & 0.67 & 0.84 & 0.24 & 0.42 & 0.58 & 0.52 \\
RankSimilarity & 0.33 & 0.73 & 0.71 & 0.77 & 0.35 & 0.50 & 0.60 & 0.66 \\
SecondOrderCosineSimilarity & 0.44 & 0.68 & 0.64 & 0.72 & 0.38 & 0.58 & 0.66 & 0.69 \\
SoftCorrelationMatch & 0.27 & 0.62 & 0.72 & 0.75 & 0.21 & 0.59 & 0.57 & 0.81 \\
UniformityDifference & 0.61 & 0.44 & 0.84 & 0.60 & 0.42 & 0.33 & 0.75 & 0.50 \\
\bottomrule
\end{tabular}%
}
\end{table}
\begin{table}[htbp]
\caption{Results of Test 6 (Layer Monotonicity) on MNLI}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{%
\begin{tabular}{l|cccc|cccc}
\toprule
\multicolumn{1}{c|}{\textbf{Representation}} &
\multicolumn{4}{c|}{\textbf{CLS Token}} &
\multicolumn{4}{c}{\textbf{Mean-pooled Token}} \\
\multicolumn{1}{c|}{\textbf{Evaluation}} &
\multicolumn{2}{c}{\textbf{Spearman}} &
\multicolumn{2}{c|}{\textbf{Conformity Rate}} &
\multicolumn{2}{c}{\textbf{Spearman}} &
\multicolumn{2}{c}{\textbf{Conformity Rate}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
BERT & ALBERT & BERT & ALBERT & BERT & ALBERT & BERT & ALBERT \\
\midrule
CKA & \bf 0.99 & 0.98 & \bf 0.98 & 0.97 & \bf 1.00 & 0.99 & 0.99 & 0.99 \\
CKA ($\delta=0.45$) & \bf 0.99 & 0.98 & \bf 0.98 & 0.97 & \bf 1.00 & \bf 1.00 & 0.99 & \bf 1.00 \\
CKA ($\delta=0.2$) & 0.92 & 0.95 & 0.90 & 0.93 & 0.95 & \bf 1.00 & 0.96 & \bf 1.00 \\
kCKA ($k=100$) & 0.91 & 0.90 & 0.90 & 0.89 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 \\
SVCCA & 0.91 & 0.77 & 0.91 & 0.85 & 0.78 & 0.83 & 0.85 & 0.87 \\
RTD & 0.42 & 0.43 & 0.76 & 0.75 & 0.95 & 0.89 & 0.94 & 0.88 \\
IMD & 0.44 & 0.79 & 0.68 & 0.85 & 0.71 & 0.75 & 0.74 & 0.89 \\
MKA ($k=100$) & \bf 0.99 & \bf 0.99 & 0.97 & \bf 0.99 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 \\
\midrule
\midrule
CKA (linear) & 1.00 & 0.99 & 0.99 & 0.98 & 1.00 & 0.99 & 0.99 & 0.99 \\
MKA ($k=15$) & 0.97 & 0.99 & 0.95 & 0.98 & 1.00 & 1.00 & 1.00 & 1.00 \\
MKA ($k=50$) & 0.99 & 0.99 & 0.97 & 0.99 & 1.00 & 1.00 & 1.00 & 1.00 \\
MKA ($k=200$) & 0.99 & 0.99 & 0.98 & 0.98 & 1.00 & 1.00 & 1.00 & 1.00 \\
\midrule
AlignedCosineSimilarity & 1.00 & 0.95 & 1.00 & 0.95 & 1.00 & 1.00 & 1.00 & 0.99 \\
ConcentricityDifference & 0.99 & 0.87 & 0.99 & 0.90 & 0.50 & 0.34 & 0.62 & 0.62 \\
DistanceCorrelation & 0.97 & 0.99 & 0.96 & 0.99 & 1.00 & 0.99 & 0.99 & 0.99 \\
EigenspaceOverlapScore & 0.99 & 0.96 & 0.98 & 0.96 & 1.00 & 1.00 & 1.00 & 1.00 \\
Gulp & 0.89 & 0.91 & 0.90 & 0.91 & 1.00 & 0.94 & 1.00 & 0.94 \\
HardCorrelationMatch & 1.00 & 1.00 & 1.00 & 0.99 & 1.00 & 1.00 & 1.00 & 1.00 \\
JaccardSimilarity & 0.95 & 0.95 & 0.94 & 0.94 & 1.00 & 1.00 & 1.00 & 1.00 \\
LinearRegression & 0.57 & 0.66 & 0.82 & 0.83 & 0.40 & 0.40 & 0.71 & 0.75 \\
MagnitudeDifference & 0.52 & 0.90 & 0.64 & 0.93 & 0.66 & 0.60 & 0.81 & 0.88 \\
OrthogonalAngularShapeMetricCentered & 1.00 & 0.99 & 0.99 & 0.99 & 1.00 & 1.00 & 1.00 & 1.00 \\
OrthogonalProcrustesCenteredAndNormalized & 0.99 & 0.99 & 0.99 & 0.98 & 1.00 & 1.00 & 1.00 & 1.00 \\
PWCCA & —— & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 \\
PermutationProcrustes & 0.73 & 0.96 & 0.80 & 0.95 & 0.90 & 0.95 & 0.90 & 0.93 \\
ProcrustesSizeAndShapeDistance & 0.92 & 0.94 & 0.96 & 0.94 & 0.99 & 0.99 & 0.99 & 0.99 \\
RSA & 1.00 & 1.00 & 0.99 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 \\
RSMNormDifference & 0.84 & 0.89 & 0.94 & 0.90 & 0.85 & 0.88 & 0.91 & 0.94 \\
RankSimilarity & 0.89 & 0.92 & 0.92 & 0.93 & 0.51 & 0.64 & 0.79 & 0.87 \\
SecondOrderCosineSimilarity & 0.94 & 0.91 & 0.94 & 0.92 & 1.00 & 1.00 & 1.00 & 1.00 \\
SoftCorrelationMatch & 0.99 & 0.99 & 0.98 & 0.98 & 1.00 & 1.00 & 0.99 & 0.99 \\
UniformityDifference & 0.81 & 0.87 & 0.94 & 0.91 & 0.83 & 0.91 & 0.95 & 0.98 \\
\bottomrule
\end{tabular}%
}
\end{table}
\clearpage
\subsection{Graph Task}
\begin{table}[htbp]
\caption{Results of Test 1 (Correlation to Accuracy Difference) for the graph domain}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{%
\begin{tabular}{l|cccccccccc}
\toprule
\multicolumn{1}{c|}{\textbf{Evaluation}} &
\multicolumn{10}{c}{\textbf{Spearman}} \\
%\midrule
\multicolumn{1}{c|}{\textbf{Dataset}} &
\multicolumn{4}{c}{\textbf{Cora}} &
\multicolumn{3}{|c|}{\textbf{Flickr}} &
\multicolumn{3}{c}{\textbf{OGBN-Arxiv}} \\
%\midrule
\multicolumn{1}{c|}{\textbf{Architecture}} &
GCN & SAGE & GAT & PGNN & GCN & SAGE & GAT & GCN & SAGE & GAT \\
\midrule
CKA & 0.04 & -0.15 & 0.03 & -0.01 & 0.50 & \bf 0.41 & -0.14 & -0.07 & 0.14 & \bf-0.03 \\
CKA ($\delta=0.45$) & \bf 0.09 & -0.22 & 0.00 & -0.06 & \bf 0.52 & 0.17 & -0.16 & -0.12 & 0.18 & -0.13 \\
CKA ($\delta=0.2$) & 0.01 & -0.29 & 0.07 & 0.03 & 0.43 & 0.14 & \bf 0.11 & -0.22 & 0.17 & -0.28 \\
kCKA ($k=100$) & 0.07 & -0.25 & -0.01 & \bf 0.11 & 0.42 & -0.22 & -0.28 &\bf -0.06 & \bf 0.23 & -0.16 \\
SVCCA & -0.03 & -0.12 & -0.16 & 0.08 & 0.01 & 0.01 & -0.18 & -0.27 & 0.09 & -0.10 \\
RTD & 0.16 & -0.26 & -0.02 & -0.30 & 0.24 & -0.02 & -0.16 & -0.32 & -0.07 & -0.27 \\
IMD & 0.03 & \bf 0.00 & -0.02 & -0.04 & -0.10 & 0.36 & -0.09 & -0.24 & -0.02 & -0.15 \\
MKA ($k=100$) & 0.01 & -0.18 & \bf 0.11 & -0.15 & 0.32 & -0.05 & -0.19 & -0.27 & 0.08 & -0.26 \\
\midrule
\midrule
CKA (linear) & 0.03 & -0.18 & 0.03 & 0.09 & 0.03 & 0.27 & -0.16 & -0.17 & 0.11 & -0.05 \\
MKA ($k=15$) & 0.00 & -0.21 & 0.13 & -0.13 & 0.32 & -0.04 & -0.21 & -0.31 & 0.05 & -0.26 \\
MKA ($k=50$) & 0.00 & -0.17 & 0.13 & -0.12 & 0.31 & -0.04 & -0.19 & -0.28 & 0.08 & -0.25 \\
MKA ($k=200$) & 0.02 & -0.16 & 0.11 & -0.12 & 0.32 & -0.06 & -0.18 & -0.26 & 0.09 & -0.28 \\
\midrule
AlignedCosineSimilarity & -0.02 & 0.13 & -0.32 & -0.04 & 0.35 & 0.24 & -0.07 & -0.08 & 0.17 & -0.17 \\
ConcentricityDifference & 0.13 & -0.25 & -0.22 & 0.13 & -0.08 & -0.29 & -0.07 & -0.07 & -0.13 & -0.12 \\
DistanceCorrelation & -0.03 & -0.18 & 0.03 & 0.13 & 0.41 & 0.42 & -0.19 & -0.10 & 0.15 & -0.06 \\
EigenspaceOverlapScore & -0.19 & 0.07 & -0.05 & -0.06 & 0.15 & -0.27 & 0.29 & -0.21 & 0.05 & -0.32 \\
Gulp & -0.20 & 0.07 & -0.12 & 0.12 & 0.26 & -0.27 & -0.27 & -0.05 & 0.06 & -0.34 \\
HardCorrelationMatch & -0.00 & -0.11 & -0.14 & 0.16 & 0.31 & 0.35 & 0.06 & 0.36 & 0.02 & 0.04 \\
JaccardSimilarity & 0.05 & -0.12 & -0.16 & 0.02 & 0.32 & 0.28 & -0.18 & -0.32 & -0.12 & -0.15 \\
LinearRegression & 0.07 & -0.22 & -0.13 & 0.05 & -0.03 & 0.17 & -0.18 & 0.07 & -0.01 & -0.19 \\
MagnitudeDifference & 0.10 & -0.13 & -0.21 & 0.13 & 0.02 & -0.17 & 0.14 & -0.18 & -0.20 & 0.11 \\
OrthogonalAngularShapeMetricCentered & 0.03 & -0.29 & -0.13 & 0.23 & 0.39 & 0.28 & -0.15 & -0.04 & 0.09 & -0.09 \\
OrthogonalProcrustesCenteredAndNormalized & 0.03 & -0.29 & -0.13 & 0.23 & 0.39 & 0.28 & -0.15 & -0.04 & 0.09 & -0.09 \\
PWCCA & -0.16 & 0.06 & -0.26 & -0.15 & —— & -0.05 & -0.16 & -0.12 & 0.06 & -0.30 \\
PermutationProcrustes & 0.05 & 0.19 & -0.28 & 0.34 & 0.20 & -0.19 & 0.15 & -0.09 & 0.03 & 0.43 \\
ProcrustesSizeAndShapeDistance & 0.04 & 0.01 & -0.21 & 0.33 & 0.02 & -0.06 & 0.11 & -0.17 & 0.07 & 0.43 \\
RSA & 0.06 & 0.04 & -0.31 & 0.20 & 0.53 & 0.32 & -0.08 & -0.07 & 0.25 & 0.32 \\
RSMNormDifference & -0.06 & 0.08 & -0.14 & 0.28 & -0.18 & -0.16 & 0.13 & -0.05 & -0.19 & 0.02 \\
RankSimilarity & 0.00 & -0.10 & 0.34 & 0.11 & 0.35 & 0.31 & -0.19 & -0.26 & 0.05 & -0.10 \\
SecondOrderCosineSimilarity & 0.02 & 0.04 & -0.12 & 0.11 & 0.54 & -0.19 & 0.01 & -0.47 & 0.22 & -0.19 \\
SoftCorrelationMatch & 0.07 & -0.05 & 0.02 & 0.12 & 0.30 & 0.33 & -0.07 & 0.35 & 0.12 & 0.12 \\
UniformityDifference & -0.06 & -0.05 & -0.08 & -0.06 & -0.18 & 0.03 & -0.18 & -0.19 & -0.20 & -0.25 \\
\bottomrule
\end{tabular}%
}
\end{table}
\begin{table}[htbp]
\caption{Results of Test 2 (Correlation to Output Difference) for the graph domain}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{%
\begin{tabular}{l|cccccccccc|cccccccccc}
\toprule
\multicolumn{1}{c|}{\textbf{Type}} &
\multicolumn{20}{c}{\textbf{Grounding by Prediction}} \\
\multicolumn{1}{c|}{\textbf{Test}} &
\multicolumn{10}{c|}{\textbf{JSD Correlation}} &
\multicolumn{10}{c}{\textbf{Disagreement Correlation}} \\
\multicolumn{1}{c|}{\textbf{Evaluation}} &
\multicolumn{10}{c|}{\textbf{Spearman}} &
\multicolumn{10}{c}{\textbf{Spearman}} \\
\multicolumn{1}{c|}{\textbf{Dataset}} &
\multicolumn{4}{c}{\textbf{Cora}} &
\multicolumn{3}{|c|}{\textbf{Flickr}} &
\multicolumn{3}{c|}{\textbf{OGBN-Arxiv}} &
\multicolumn{4}{c}{\textbf{Cora}} &
\multicolumn{3}{|c|}{\textbf{Flickr}} &
\multicolumn{3}{c}{\textbf{OGBN-Arxiv}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
GCN & SAGE & GAT & PGNN & GCN & SAGE & GAT & GCN & SAGE & GAT & GCN & SAGE & GAT & PGNN & GCN & SAGE & GAT & GCN & SAGE & GAT \\
\midrule
CKA & 0.73 & 0.12 & 0.59 & -0.23 & 0.54 & \bf 0.35 & 0.12 & 0.18 & 0.19 & \bf 0.38 & 0.65 & -0.05 & \bf 0.44 & 0.01 & 0.26 & 0.32 & 0.02 & 0.13 & 0.14 & \bf 0.21 \\
CKA ($\delta=0.45$) & 0.73 & 0.30 & 0.52 & -0.08 & \bf 0.56 & 0.18 & 0.15 & 0.22 & 0.22 & 0.25 & 0.64 & -0.06 & 0.34 & 0.14 & \bf 0.29 & 0.15 & 0.01 & 0.21 & \bf 0.18 & 0.09 \\
CKA ($\delta=0.2$) & 0.61 & 0.49 & 0.52 & -0.04 & 0.41 & 0.25 & -0.42 & 0.16 & 0.24 & 0.11 & 0.51 & 0.17 & 0.41 & 0.03 & 0.14 & 0.22 & -0.43 & 0.15 & 0.17 & -0.01 \\
kCKA ($k=100$) & 0.73 & 0.46 & \bf 0.60 & \bf 0.07 & 0.44 & -0.19 & 0.08 & \bf 0.29 & 0.09 & 0.27 & 0.62 & 0.01 & 0.37 & \bf 0.18 & 0.15 & -0.15 & -0.05 & \bf 0.32 & 0.08 & 0.14 \\
SVCCA & 0.52 & 0.02 & 0.26 & -0.08 & -0.04 & 0.23 & 0.13 & 0.19 & 0.16 & -0.17 & 0.59 & 0.04 & 0.17 & -0.01 & -0.27 & 0.21 & \bf 0.03 & 0.08 & 0.11 & -0.15 \\
RTD & 0.52 & \bf 0.54 & 0.13 & -0.01 & 0.26 & -0.28 & 0.06 & 0.01 & 0.00 & 0.07 & 0.49 & \bf 0.24 & 0.07 & -0.19 & 0.05 & -0.22 & \bf 0.03 & 0.02 & -0.11 & -0.05 \\
IMD & -0.12 & -0.43 & -0.13 & -0.01 & -0.08 & 0.29 & 0.04 & -0.12 & -0.03 & -0.02 & -0.18 & -0.10 & -0.20 & -0.21 & -0.10 & \bf 0.33 & -0.02 & -0.14 & 0.01 & 0.01 \\
MKA ($k=100$) & \bf 0.77 & 0.53 & 0.48 & -0.06 & 0.32 & 0.33 & \bf 0.16 & 0.18 & \bf 0.30 & 0.11 & \bf 0.74 & 0.15 & 0.34 & -0.23 & 0.01 & \bf 0.33 & 0.02 & 0.18 & 0.16 & 0.00 \\
\midrule
\midrule
CKA (linear) & 0.71 & -0.03 & 0.53 & -0.22 & 0.03 & 0.58 & 0.17 & 0.12 & -0.02 & 0.38 & 0.65 & 0.00 & 0.45 & -0.02 & -0.21 & 0.53 & 0.06 & 0.03 & -0.04 & 0.23 \\
MKA ($k=15$) & 0.75 & 0.52 & 0.47 & -0.35 & 0.31 & 0.26 & 0.17 & 0.16 & 0.30 & 0.10 & 0.73 & 0.13 & 0.31 & -0.18 & 0.00 & 0.24 & 0.03 & 0.15 & 0.14 & 0.00 \\
MKA ($k=50$) & 0.76 & 0.53 & 0.48 & -0.20 & 0.32 & 0.31 & 0.17 & 0.17 & 0.29 & 0.12 & 0.73 & 0.14 & 0.33 & -0.20 & 0.01 & 0.30 & 0.03 & 0.17 & 0.15 & 0.01 \\
MKA ($k=200$) & 0.77 & 0.55 & 0.49 & -0.01 & 0.33 & 0.33 & 0.15 & 0.20 & 0.31 & 0.11 & 0.73 & 0.15 & 0.34 & -0.22 & 0.02 & 0.33 & 0.02 & 0.20 & 0.18 & -0.01 \\
\midrule
AlignedCosineSimilarity & 0.32 & 0.38 & 0.17 & -0.14 & 0.31 & 0.44 & -0.01 & -0.03 & 0.05 & 0.28 & 0.27 & 0.27 & -0.05 & 0.14 & 0.15 & 0.37 & -0.08 & -0.10 & 0.00 & 0.17 \\
ConcentricityDifference & 0.41 & 0.04 & 0.01 & 0.26 & -0.17 & -0.03 & 0.03 & -0.13 & 0.02 & -0.22 & 0.31 & -0.10 & 0.03 & 0.03 & -0.21 & -0.04 & 0.03 & -0.25 & 0.07 & -0.16 \\
DistanceCorrelation & 0.71 & 0.05 & 0.60 & -0.23 & 0.46 & 0.43 & 0.03 & 0.16 & 0.12 & 0.36 & 0.63 & -0.08 & 0.46 & 0.08 & 0.17 & 0.40 & -0.03 & 0.11 & 0.08 & 0.20 \\
EigenspaceOverlapScore & -0.50 & 0.22 & -0.04 & -0.14 & -0.03 & 0.38 & 0.11 & -0.17 & 0.11 & 0.37 & -0.46 & 0.02 & -0.09 & 0.21 & 0.02 & 0.33 & 0.23 & -0.25 & -0.02 & 0.12 \\
Gulp & -0.58 & 0.18 & -0.04 & 0.33 & 0.48 & 0.38 & 0.12 & 0.13 & 0.11 & 0.35 & -0.54 & -0.09 & -0.13 & 0.34 & 0.29 & 0.33 & -0.01 & 0.11 & -0.04 & 0.10 \\
HardCorrelationMatch & 0.75 & 0.16 & 0.52 & -0.07 & 0.53 & 0.50 & 0.09 & -0.05 & -0.28 & 0.46 & 0.66 & 0.10 & 0.26 & 0.33 & 0.40 & 0.46 & -0.09 & 0.02 & -0.24 & 0.24 \\
JaccardSimilarity & 0.78 & 0.46 & 0.38 & -0.01 & 0.33 & 0.42 & 0.11 & 0.20 & 0.09 & 0.37 & 0.68 & 0.33 & 0.12 & 0.01 & 0.04 & 0.42 & 0.00 & 0.15 & 0.01 & 0.22 \\
LinearRegression & 0.39 & 0.33 & 0.19 & -0.21 & 0.05 & 0.48 & 0.18 & -0.10 & -0.17 & 0.47 & 0.35 & 0.09 & -0.06 & 0.10 & 0.01 & 0.46 & 0.06 & -0.12 & -0.19 & 0.22 \\
MagnitudeDifference & 0.44 & 0.06 & -0.15 & 0.23 & 0.03 & 0.06 & -0.26 & -0.13 & -0.13 & 0.08 & 0.32 & -0.25 & -0.07 & 0.27 & 0.06 & 0.07 & -0.20 & -0.25 & -0.19 & 0.22 \\
OrthogonalAngularShapeMetricCentered & 0.73 & 0.27 & 0.28 & -0.10 & 0.43 & 0.63 & 0.13 & 0.04 & -0.15 & 0.44 & 0.66 & 0.16 & 0.05 & 0.17 & 0.19 & 0.57 & 0.03 & 0.02 & -0.16 & 0.27 \\
OrthogonalProcrustesCenteredAndNormalized & 0.73 & 0.27 & 0.28 & -0.10 & 0.43 & 0.63 & 0.13 & 0.04 & -0.15 & 0.44 & 0.66 & 0.16 & 0.05 & 0.17 & 0.19 & 0.57 & 0.03 & 0.02 & -0.16 & 0.27 \\
PWCCA & -0.20 & 0.27 & 0.02 & -0.23 & —— & 0.38 & 0.27 & -0.21 & -0.04 & 0.29 & -0.20 & 0.36 & -0.14 & 0.23 & —— & 0.32 & 0.16 & -0.24 & -0.08 & 0.03 \\
PermutationProcrustes & 0.68 & 0.10 & 0.13 & 0.26 & 0.29 & -0.10 & -0.42 & 0.34 & -0.55 & 0.06 & 0.65 & 0.33 & -0.14 & 0.38 & 0.18 & -0.10 & -0.27 & 0.25 & -0.41 & 0.22 \\
ProcrustesSizeAndShapeDistance & 0.69 & 0.08 & 0.05 & 0.27 & 0.02 & -0.18 & -0.38 & 0.36 & -0.50 & 0.13 & 0.62 & 0.24 & -0.17 & 0.38 & -0.13 & -0.15 & -0.27 & 0.26 & -0.38 & 0.30 \\
RSA & 0.46 & 0.13 & 0.20 & 0.23 & 0.52 & 0.63 & 0.14 & 0.20 & 0.14 & 0.46 & 0.47 & 0.33 & -0.02 & 0.38 & 0.35 & 0.59 & 0.06 & 0.17 & 0.15 & 0.45 \\
RSMNormDifference & 0.33 & -0.01 & -0.07 & 0.20 & -0.22 & -0.04 & -0.37 & -0.20 & 0.11 & 0.33 & 0.34 & 0.14 & -0.15 & 0.30 & -0.30 & 0.00 & -0.26 & -0.14 & 0.09 & 0.36 \\
RankSimilarity & 0.39 & 0.55 & 0.54 & -0.04 & 0.33 & 0.30 & 0.22 & 0.03 & 0.36 & 0.13 & 0.46 & 0.21 & 0.46 & 0.21 & 0.06 & 0.24 & 0.05 & 0.04 & 0.27 & 0.09 \\
SecondOrderCosineSimilarity & 0.81 & 0.54 & 0.53 & -0.14 & 0.47 & 0.15 & 0.07 & 0.11 & 0.34 & 0.11 & 0.69 & 0.25 & 0.30 & 0.00 & 0.30 & 0.14 & -0.10 & 0.04 & 0.27 & 0.06 \\
SoftCorrelationMatch & 0.80 & -0.01 & 0.21 & -0.06 & 0.53 & 0.53 & 0.23 & 0.02 & -0.28 & 0.56 & 0.73 & 0.06 & 0.07 & 0.31 & 0.39 & 0.49 & 0.02 & 0.03 & -0.26 & 0.35 \\
UniformityDifference & -0.11 & 0.27 & -0.11 & -0.22 & -0.32 & 0.02 & 0.21 & -0.34 & 0.12 & 0.12 & -0.13 & 0.16 & -0.15 & 0.08 & -0.34 & 0.04 & 0.18 & -0.33 & 0.10 & 0.06 \\
\bottomrule
\end{tabular}%
}
\end{table}
\begin{table}[htbp]
\caption{Results of Test 3 (Label Randomization) for the graph domain}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{%
\begin{tabular}{l|cccccccccc|cccccccccc}
\toprule
\multicolumn{1}{c|}{\textbf{Evaluation}} &
\multicolumn{10}{c|}{\textbf{AUPRC}} &
\multicolumn{10}{c}{\textbf{Conformity Rate}} \\
\multicolumn{1}{c|}{\textbf{Dataset}} &
\multicolumn{4}{c|}{\textbf{Cora}} &
\multicolumn{3}{c|}{\textbf{Flickr}} &
\multicolumn{3}{c|}{\textbf{OGBN-Arxiv}} &
\multicolumn{4}{c|}{\textbf{Cora}} &
\multicolumn{3}{c|}{\textbf{Flickr}} &
\multicolumn{3}{c}{\textbf{OGBN-Arxiv}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
GCN & SAGE & GAT & PGNN & GCN & SAGE & GAT & GCN & SAGE & GAT & GCN & SAGE & GAT & PGNN & GCN & SAGE & GAT & GCN & SAGE & GAT \\
\midrule
CKA & 0.48 & 0.56 & 0.43 & 0.25 & 0.88 & 0.42 & 0.31 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.72 & 0.78 & 0.54 & 0.64 & 0.96 & 0.50 & 0.61 & \bf 1.00 & \bf 1.00 & \bf 1.00 \\
CKA ($\delta=0.45$) & 0.50 & 0.50 & 0.52 & 0.26 & \bf 0.92 & 0.45 & \bf 0.38 & \bf 1.00 & 0.96 & \bf 1.00 & 0.75 & 0.77 & 0.76 & \bf 0.65 & \bf 0.98 & 0.66 & \bf 0.69 & \bf 1.00 & 0.98 & \bf 1.00 \\
CKA ($\delta=0.2$) & \bf 0.74 & 0.72 & 0.73 & 0.21 & 0.78 & 0.42 & 0.23 & \bf 1.00 & 0.90 & 0.99 & \bf 0.93 & 0.88 & 0.86 & 0.52 & 0.93 & 0.50 & 0.57 & \bf 1.00 & 0.98 & \bf 1.00 \\
kCKA ($k=100$) & 0.43 & 0.42 & 0.42 & 0.24 & 0.74 & 0.43 & 0.37 & \bf 1.00 & 0.74 & 0.92 & 0.59 & 0.52 & 0.52 & 0.58 & 0.88 & 0.61 & 0.66 & \bf 1.00 & 0.88 & 0.98 \\
SVCCA & 0.37 & 0.31 & 0.42 & 0.19 & 0.33 & \bf 0.80 & 0.27 & \bf 1.00 & \bf 1.00 & 0.93 & 0.63 & 0.56 & 0.59 & 0.39 & 0.69 & \bf 0.91 & 0.54 & \bf 1.00 & \bf 1.00 & 0.96 \\
RTD & 0.58 & 0.84 & \bf 0.96 & 0.22 & 0.86 & 0.63 & 0.21 & 0.98 & 0.85 & 0.93 & 0.82 & 0.97 & \bf 0.99 & 0.51 & 0.97 & 0.84 & 0.47 & \bf 1.00 & 0.95 & 0.99 \\
IMD & 0.66 & \bf 0.98 & 0.83 & \bf 0.27 & 0.22 & 0.35 & 0.23 & \bf 1.00 & \bf 1.00 & 0.89 & 0.90 & \bf 0.99 & 0.96 & 0.53 & 0.47 & 0.70 & 0.54 & \bf 1.00 & \bf 1.00 & 0.97 \\
MKA ($k=100$) & 0.45 & 0.43 & 0.45 & 0.21 & 0.73 & 0.43 & 0.30 & 0.94 & 0.43 & 0.81 & 0.66 & 0.54 & 0.67 & 0.52 & 0.85 & 0.51 & 0.56 & 0.99 & 0.57 & 0.95 \\
\midrule
\midrule
CKA (linear) & 0.43 & 0.42 & 0.42 & 0.24 & 0.73 & 0.66 & 0.27 & 1.00 & 1.00 & 1.00 & 0.55 & 0.51 & 0.51 & 0.58 & 0.91 & 0.86 & 0.54 & 1.00 & 1.00 & 1.00 \\
MKA ($k=15$) & 0.44 & 0.42 & 0.44 & 0.24 & 0.73 & 0.43 & 0.31 & 0.94 & 0.43 & 0.80 & 0.63 & 0.51 & 0.62 & 0.55 & 0.84 & 0.58 & 0.57 & 0.99 & 0.54 & 0.94 \\
MKA ($k=50$) & 0.44 & 0.42 & 0.44 & 0.23 & 0.73 & 0.43 & 0.31 & 0.94 & 0.43 & 0.81 & 0.64 & 0.52 & 0.64 & 0.53 & 0.85 & 0.52 & 0.56 & 0.99 & 0.55 & 0.95 \\
MKA ($k=200$) & 0.46 & 0.43 & 0.46 & 0.20 & 0.73 & 0.43 & 0.31 & 0.94 & 0.44 & 0.81 & 0.67 & 0.57 & 0.69 & 0.51 & 0.85 & 0.51 & 0.56 & 0.99 & 0.60 & 0.95 \\
\midrule
AlignedCosineSimilarity & 0.50 & 0.48 & 0.43 & 0.24 & 0.84 & 0.42 & 0.29 & 0.98 & 0.70 & 0.93 & 0.71 & 0.66 & 0.60 & 0.60 & 0.94 & 0.50 & 0.58 & 1.00 & 0.67 & 0.97 \\
ConcentricityDifference & 0.29 & 0.39 & 0.39 & 0.20 & 0.37 & 0.57 & 0.21 & 0.96 & 1.00 & 1.00 & 0.64 & 0.78 & 0.74 & 0.46 & 0.72 & 0.85 & 0.48 & 0.99 & 1.00 & 1.00 \\
DistanceCorrelation & 0.43 & 0.44 & 0.42 & 0.25 & 0.86 & 0.43 & 0.22 & 1.00 & 1.00 & 1.00 & 0.60 & 0.63 & 0.52 & 0.66 & 0.95 & 0.56 & 0.52 & 1.00 & 1.00 & 1.00 \\
EigenspaceOverlapScore & 0.48 & 0.26 & 0.28 & 0.26 & 0.41 & 0.42 & 0.22 & 0.25 & 0.43 & 0.34 & 0.72 & 0.55 & 0.66 & 0.59 & 0.50 & 0.52 & 0.58 & 0.50 & 0.54 & 0.51 \\
Gulp & 0.48 & 0.26 & 0.28 & 0.24 & 0.20 & 0.42 & 0.23 & 0.29 & 0.43 & 0.30 & 0.74 & 0.56 & 0.66 & 0.56 & 0.50 & 0.51 & 0.57 & 0.51 & 0.55 & 0.56 \\
HardCorrelationMatch & 0.42 & 0.42 & 0.42 & 0.22 & 0.77 & 0.46 & 0.33 & 0.83 & 0.83 & 0.54 & 0.53 & 0.51 & 0.51 & 0.54 & 0.94 & 0.68 & 0.67 & 0.97 & 0.97 & 0.77 \\
JaccardSimilarity & 0.42 & 0.42 & 0.43 & 0.25 & 0.56 & 0.43 & 0.29 & 0.83 & 0.43 & 0.78 & 0.52 & 0.51 & 0.56 & 0.57 & 0.77 & 0.58 & 0.57 & 0.97 & 0.53 & 0.93 \\
LinearRegression & 0.49 & 0.43 & 0.46 & 0.24 & 0.22 & 0.45 & 0.23 & 0.45 & 0.68 & 0.47 & 0.72 & 0.58 & 0.69 & 0.54 & 0.48 & 0.66 & 0.52 & 0.64 & 0.81 & 0.63 \\
MagnitudeDifference & 0.24 & 0.24 & 0.37 & 0.27 & 0.66 & 0.72 & 0.18 & 0.55 & 0.49 & 0.34 & 0.56 & 0.60 & 0.74 & 0.61 & 0.89 & 0.93 & 0.47 & 0.83 & 0.86 & 0.75 \\
OrthogonalAngularShapeMetricCentered & 0.43 & 0.42 & 0.42 & 0.23 & 0.88 & 0.43 & 0.27 & 0.83 & 0.73 & 0.77 & 0.54 & 0.51 & 0.52 & 0.60 & 0.95 & 0.60 & 0.53 & 0.97 & 0.84 & 0.88 \\
OrthogonalProcrustesCenteredAndNormalized & 0.43 & 0.42 & 0.42 & 0.23 & 0.88 & 0.43 & 0.27 & 0.83 & 0.73 & 0.77 & 0.54 & 0.51 & 0.52 & 0.60 & 0.95 & 0.60 & 0.53 & 0.97 & 0.84 & 0.88 \\
PWCCA & 0.45 & 0.33 & 0.28 & 0.26 & —— & 0.44 & 0.24 & 0.24 & 0.44 & 0.36 & 0.67 & 0.52 & 0.64 & 0.58 & 1.00 & 0.64 & 0.53 & 0.44 & 0.61 & 0.56 \\
PermutationProcrustes & 0.45 & 0.39 & 0.44 & 0.27 & 0.77 & 0.90 & 0.19 & 0.68 & 0.72 & 0.93 & 0.66 & 0.71 & 0.61 & 0.52 & 0.94 & 0.97 & 0.50 & 0.84 & 0.88 & 0.98 \\
ProcrustesSizeAndShapeDistance & 0.46 & 0.45 & 0.43 & 0.27 & 0.79 & 0.62 & 0.19 & 0.92 & 0.98 & 1.00 & 0.70 & 0.68 & 0.60 & 0.51 & 0.93 & 0.88 & 0.51 & 0.98 & 1.00 & 1.00 \\
RSA & 0.47 & 0.44 & 0.43 & 0.23 & 0.74 & 0.42 & 0.33 & 0.96 & 0.43 & 0.49 & 0.71 & 0.61 & 0.54 & 0.57 & 0.89 & 0.52 & 0.64 & 0.99 & 0.58 & 0.63 \\
RSMNormDifference & 0.53 & 0.53 & 0.78 & 0.29 & 0.71 & 0.92 & 0.19 & 1.00 & 1.00 & 1.00 & 0.78 & 0.83 & 0.86 & 0.62 & 0.91 & 0.97 & 0.51 & 1.00 & 1.00 & 1.00 \\
RankSimilarity & 0.45 & 0.42 & 0.50 & 0.20 & 0.48 & 0.43 & 0.33 & 0.85 & 0.55 & 0.78 & 0.64 & 0.53 & 0.66 & 0.49 & 0.66 & 0.53 & 0.58 & 0.97 & 0.63 & 0.93 \\
SecondOrderCosineSimilarity & 0.56 & 0.73 & 0.73 & 0.22 & 0.61 & 0.42 & 0.37 & 0.99 & 0.96 & 0.95 & 0.80 & 0.87 & 0.86 & 0.52 & 0.82 & 0.50 & 0.66 & 1.00 & 0.99 & 0.99 \\
SoftCorrelationMatch & 0.43 & 0.42 & 0.42 & 0.23 & 0.60 & 0.45 & 0.33 & 0.83 & 0.82 & 0.55 & 0.53 & 0.51 & 0.50 & 0.55 & 0.85 & 0.67 & 0.65 & 0.97 & 0.96 & 0.71 \\
UniformityDifference & 0.29 & 0.50 & 0.32 & 0.24 & 0.53 & 0.90 & 0.40 & 0.53 & 0.54 & 0.33 & 0.65 & 0.81 & 0.67 & 0.58 & 0.84 & 0.96 & 0.75 & 0.78 & 0.81 & 0.66 \\
\bottomrule
\end{tabular}%
}
\end{table}
\begin{table}[htbp]
\caption{Results of Test 4 (Shortcut Affinity) for the graph domain}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{%
\begin{tabular}{l|cccccccccc|cccccccccc}
\toprule
\multicolumn{1}{c|}{\textbf{Evaluation}} &
\multicolumn{10}{c|}{\textbf{AUPRC}} &
\multicolumn{10}{c}{\textbf{Conformity Rate}} \\
\multicolumn{1}{c|}{\textbf{Dataset}} &
\multicolumn{4}{c|}{\textbf{Cora}} &
\multicolumn{3}{c|}{\textbf{Flickr}} &
\multicolumn{3}{c|}{\textbf{OGBN-Arxiv}} &
\multicolumn{4}{c|}{\textbf{Cora}} &
\multicolumn{3}{c|}{\textbf{Flickr}} &
\multicolumn{3}{c}{\textbf{OGBN-Arxiv}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
GCN & SAGE & GAT & PGNN & GCN & SAGE & GAT & GCN & SAGE & GAT & GCN & SAGE & GAT & PGNN & GCN & SAGE & GAT & GCN & SAGE & GAT \\
\midrule
CKA & 0.67 & 0.82 & 0.80 & 0.42 & 0.41 & \bf 1.00 & 0.37 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.85 & 0.92 & 0.95 & 0.62 & 0.78 & \bf 1.00 & 0.75 & \bf 1.00 & \bf 1.00 & \bf 1.00 \\
CKA ($\delta=0.45$) & 0.73 & 0.81 & 0.82 & 0.42 & 0.75 & \bf 1.00 & 0.53 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.91 & 0.92 & 0.96 & 0.55 & 0.94 & \bf 1.00 & 0.80 & \bf 1.00 & \bf 1.00 & \bf 1.00 \\
CKA ($\delta=0.2$) & \bf 0.79 & 0.85 & 0.78 & 0.42 & 0.97 & \bf 1.00 & 0.29 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 0.95 & 0.96 & 0.93 & 0.55 & 0.99 & \bf 1.00 & 0.60 & \bf 1.00 & \bf 1.00 & \bf 1.00 \\
kCKA ($k=100$) & 0.77 & 0.86 & \bf 0.84 & 0.42 & \bf 1.00 & \bf 1.00 & \bf 0.55 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.94 & 0.95 & \bf 0.97 & 0.56 & \bf 1.00 & \bf 1.00 & \bf 0.84 & \bf 1.00 & \bf 1.00 & \bf 1.00 \\
SVCCA & 0.23 & 0.36 & 0.46 & 0.24 & 0.24 & 0.93 & 0.32 & \bf 1.00 & 0.97 & 0.83 & 0.46 & 0.60 & 0.69 & 0.53 & 0.57 & 0.97 & 0.66 & \bf 1.00 & 0.99 & 0.91 \\
RTD & 0.72 & \bf 0.89 & 0.73 & 0.34 & 0.78 & \bf 1.00 & 0.31 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.88 & 0.96 & 0.93 & \bf 0.65 & 0.91 & \bf 1.00 & 0.63 & \bf 1.00 & \bf 1.00 & \bf 1.00 \\
IMD & 0.69 & 0.80 & 0.60 & 0.23 & 0.75 & 0.97 & 0.36 & 0.61 & 0.93 & 0.92 & 0.92 & 0.96 & 0.89 & \bf 0.65 & 0.94 & 0.99 & 0.62 & 0.84 & 0.98 & 0.98 \\
MKA ($k=100$) & 0.76 & 0.87 & 0.83 & \bf 0.43 & \bf 1.00 & 0.99 & 0.54 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.92 & \bf 0.97 & 0.96 & 0.58 & \bf 1.00 & \bf 1.00 & 0.83 & \bf 1.00 & \bf 1.00 & \bf 1.00 \\
\midrule
\midrule
CKA (linear) & 0.61 & 0.78 & 0.78 & 0.34 & 0.28 & 1.00 & 0.33 & 1.00 & 0.98 & 1.00 & 0.75 & 0.90 & 0.94 & 0.63 & 0.57 & 1.00 & 0.65 & 1.00 & 1.00 & 1.00 \\
MKA ($k=15$) & 0.75 & 0.83 & 0.82 & 0.42 & 1.00 & 0.86 & 0.55 & 1.00 & 1.00 & 1.00 & 0.91 & 0.95 & 0.96 & 0.58 & 1.00 & 0.96 & 0.84 & 1.00 & 1.00 & 1.00 \\
MKA ($k=50$) & 0.76 & 0.87 & 0.83 & 0.43 & 1.00 & 0.95 & 0.54 & 1.00 & 1.00 & 1.00 & 0.92 & 0.97 & 0.96 & 0.59 & 1.00 & 0.99 & 0.84 & 1.00 & 1.00 & 1.00 \\
MKA ($k=200$) & 0.76 & 0.88 & 0.83 & 0.43 & 1.00 & 1.00 & 0.54 & 1.00 & 1.00 & 1.00 & 0.92 & 0.97 & 0.96 & 0.58 & 1.00 & 1.00 & 0.83 & 1.00 & 1.00 & 1.00 \\
\midrule
AlignedCosineSimilarity & 0.64 & 0.60 & 0.59 & 0.41 & 0.89 & 1.00 & 0.48 & 1.00 & 1.00 & 1.00 & 0.84 & 0.88 & 0.89 & 0.63 & 0.98 & 1.00 & 0.77 & 1.00 & 1.00 & 1.00 \\
ConcentricityDifference & 0.51 & 0.17 & 0.20 & 0.19 & 0.18 & 0.18 & 0.32 & 0.81 & 0.96 & 1.00 & 0.77 & 0.43 & 0.54 & 0.45 & 0.50 & 0.46 & 0.61 & 0.96 & 0.99 & 1.00 \\
DistanceCorrelation & 0.66 & 0.82 & 0.80 & 0.42 & 0.33 & 1.00 & 0.32 & 1.00 & 0.99 & 1.00 & 0.83 & 0.92 & 0.95 & 0.62 & 0.69 & 1.00 & 0.72 & 1.00 & 1.00 & 1.00 \\
EigenspaceOverlapScore & 0.30 & 0.50 & 0.20 & 0.37 & 0.46 & 0.43 & 0.54 & 1.00 & 0.72 & 0.97 & 0.71 & 0.74 & 0.46 & 0.50 & 0.87 & 0.60 & 0.81 & 1.00 & 0.83 & 0.99 \\
Gulp & 0.29 & 0.50 & 0.20 & 0.37 & 0.23 & 0.43 & 0.45 & 0.48 & 0.72 & 0.96 & 0.67 & 0.74 & 0.46 & 0.50 & 0.45 & 0.60 & 0.81 & 0.88 & 0.83 & 0.99 \\
HardCorrelationMatch & 0.28 & 0.35 & 0.29 & 0.35 & 0.55 & 1.00 & 0.52 & 0.80 & 0.72 & 0.83 & 0.65 & 0.77 & 0.70 & 0.69 & 0.83 & 1.00 & 0.81 & 0.96 & 0.83 & 0.97 \\
JaccardSimilarity & 0.73 & 0.78 & 0.87 & 0.42 & 1.00 & 0.83 & 0.54 & 1.00 & 1.00 & 1.00 & 0.91 & 0.93 & 0.97 & 0.53 & 1.00 & 0.96 & 0.84 & 1.00 & 1.00 & 1.00 \\
LinearRegression & 0.70 & 0.74 & 0.50 & 0.35 & 0.23 & 0.61 & 0.36 & 1.00 & 1.00 & 1.00 & 0.88 & 0.95 & 0.75 & 0.63 & 0.56 & 0.81 & 0.72 & 1.00 & 1.00 & 1.00 \\
MagnitudeDifference & 0.37 & 0.15 & 0.18 & 0.22 & 0.27 & 0.78 & 0.20 & 0.55 & 0.53 & 1.00 & 0.74 & 0.41 & 0.49 & 0.58 & 0.67 & 0.89 & 0.48 & 0.82 & 0.82 & 1.00 \\
OrthogonalAngularShapeMetricCentered & 0.63 & 0.79 & 0.62 & 0.35 & 0.58 & 1.00 & 0.31 & 1.00 & 1.00 & 1.00 & 0.79 & 0.91 & 0.90 & 0.63 & 0.80 & 1.00 & 0.72 & 1.00 & 1.00 & 1.00 \\
OrthogonalProcrustesCenteredAndNormalized & 0.63 & 0.79 & 0.62 & 0.35 & 0.58 & 1.00 & 0.31 & 1.00 & 1.00 & 1.00 & 0.79 & 0.91 & 0.90 & 0.63 & 0.80 & 1.00 & 0.72 & 1.00 & 1.00 & 1.00 \\
PWCCA & 0.33 & 0.47 & 0.22 & 0.37 & —— & 0.43 & 0.33 & 0.99 & 0.72 & 1.00 & 0.73 & 0.71 & 0.56 & 0.51 & 1.00 & 0.60 & 0.73 & 1.00 & 0.83 & 1.00 \\
PermutationProcrustes & 0.23 & 0.27 & 0.25 & 0.25 & 0.42 & 1.00 & 0.27 & 0.65 & 0.43 & 0.77 & 0.57 & 0.67 & 0.64 & 0.56 & 0.72 & 1.00 & 0.58 & 0.86 & 0.60 & 0.91 \\
ProcrustesSizeAndShapeDistance & 0.60 & 0.81 & 0.68 & 0.25 & 0.60 & 1.00 & 0.33 & 1.00 & 1.00 & 1.00 & 0.77 & 0.91 & 0.92 & 0.58 & 0.82 & 1.00 & 0.61 & 1.00 & 1.00 & 1.00 \\
RSA & 0.49 & 0.43 & 0.73 & 0.42 & 0.89 & 1.00 & 0.52 & 0.97 & 0.98 & 0.90 & 0.72 & 0.78 & 0.91 & 0.70 & 0.97 & 1.00 & 0.81 & 0.99 & 0.99 & 0.98 \\
RSMNormDifference & 0.46 & 0.50 & 0.69 & 0.37 & 0.48 & 0.92 & 0.36 & 1.00 & 1.00 & 1.00 & 0.70 & 0.76 & 0.91 & 0.64 & 0.75 & 0.97 & 0.59 & 1.00 & 1.00 & 1.00 \\
RankSimilarity & 0.72 & 0.64 & 0.85 & 0.42 & 1.00 & 0.77 & 0.54 & 1.00 & 1.00 & 1.00 & 0.86 & 0.89 & 0.96 & 0.56 & 1.00 & 0.92 & 0.83 & 1.00 & 1.00 & 1.00 \\
SecondOrderCosineSimilarity & 0.85 & 0.91 & 0.81 & 0.33 & 1.00 & 1.00 & 0.52 & 1.00 & 1.00 & 1.00 & 0.95 & 0.97 & 0.95 & 0.68 & 1.00 & 1.00 & 0.82 & 1.00 & 1.00 & 1.00 \\
SoftCorrelationMatch & 0.31 & 0.40 & 0.30 & 0.39 & 0.66 & 1.00 & 0.57 & 0.82 & 0.72 & 0.83 & 0.68 & 0.79 & 0.71 & 0.67 & 0.87 & 1.00 & 0.82 & 0.96 & 0.83 & 0.97 \\
UniformityDifference & 0.30 & 0.64 & 0.71 & 0.30 & 0.59 & 0.50 & 0.48 & 0.99 & 0.72 & 0.97 & 0.62 & 0.91 & 0.91 & 0.66 & 0.83 & 0.77 & 0.67 & 1.00 & 0.86 & 0.99 \\
\bottomrule
\end{tabular}%
}
\end{table}
\begin{table}[htbp]
\caption{Results of Test 5 (Augmentation) for the graph domain}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{%
\begin{tabular}{l|cccccccc|cccccccc}
\toprule
\multicolumn{1}{c|}{\textbf{Evaluation}} &
\multicolumn{8}{c|}{\textbf{AUPRC}} &
\multicolumn{8}{c}{\textbf{Conformity Rate}} \\
\multicolumn{1}{c|}{\textbf{Dataset}} &
\multicolumn{3}{c}{\textbf{Cora}} &
\multicolumn{3}{|c|}{\textbf{Flickr}} &
\multicolumn{2}{c|}{\textbf{OGBN-Arxiv}} &
\multicolumn{3}{c}{\textbf{Cora}} &
\multicolumn{3}{|c|}{\textbf{Flickr}} &
\multicolumn{2}{c}{\textbf{OGBN-Arxiv}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
GCN & SAGE & GAT & GCN & SAGE & GAT & GCN & SAGE & GCN & SAGE & GAT & GCN & SAGE & GAT & GCN & SAGE \\
\midrule
CKA & 0.13 & 0.76 & 0.91 & 0.64 & 0.80 & 0.57 & \bf 1.00 & \bf 1.00 & 0.37 & 0.92 & 0.97 & 0.85 & 0.94 & 0.77 & \bf 1.00 & \bf 1.00 \\
CKA ($\delta=0.45$) & 0.13 & 0.80 & \bf 0.98 & 0.69 & 0.94 & 0.58 & \bf 1.00 & \bf 1.00 & 0.38 & 0.93 & \bf 0.99 & 0.86 & 0.99 & 0.80 & \bf 1.00 & \bf 1.00\\
CKA ($\delta=0.2$) & 0.13 & \bf 0.93 & 0.93 & \bf 0.75 & 0.42 & 0.55 & 1.00 & 0.99 & 0.38 & \bf 0.99 & 0.97 & 0.90 & 0.52 & \bf 0.84 & \bf 1.00 & \bf 1.00\\
kCKA ($k=100$) & 0.13 & 0.76 & 0.96 & 0.74 & 0.82 & 0.60 & \bf 1.00 & 0.96 & 0.37 & 0.92 & \bf 0.99 & 0.89 & 0.94 & 0.83 & \bf 1.00 & 0.99\\
SVCCA & \bf 0.14 & 0.29 & 0.34 & 0.64 & 0.67 & 0.49 & 0.89 & 0.81 & 0.39 & 0.55 & 0.58 & 0.84 & 0.86 & 0.68 & 0.89 & 0.85\\
RTD & \bf 0.14 & 0.81 & 0.95 & 0.72 & \bf 1.00 & 0.54 & \bf 1.00 & \bf 1.00 & \bf 0.42 & 0.95 & \bf 0.99 & \bf 0.91 & \bf 1.00 & 0.75 & \bf 1.00 & \bf 1.00\\
IMD & \bf 0.14 & 0.36 & 0.21 & 0.59 & 0.59 & 0.28 & \bf 1.00 & \bf 1.00 & \bf 0.42 & 0.65 & 0.48 & 0.87 & 0.87 & 0.53 & \bf 1.00 & \bf 1.00\\
UMAP ($k=100$) & 0.13 & 0.84 & 0.93 & \bf 0.75 & 0.93 & \bf 0.59 & \bf 1.00 & \bf 1.00 & 0.35 & 0.95 & 0.98 & 0.89 & 0.98 & \bf 0.84 & \bf 1.00 & \bf 1.00\\
\midrule
\midrule
CKA (linear) & 0.12 & 0.73 & 0.91 & 0.57 & 0.75 & 0.51 & 1.00 & 1.00 & 0.33 & 0.92 & 0.97 & 0.83 & 0.91 & 0.74 & 1.00 & 1.00\\
MKA ($k=15$) & 0.13 & 0.82 & 0.92 & 0.75 & 0.93 & 0.57 & 1.00 & 1.00 & 0.35 & 0.94 & 0.98 & 0.89 & 0.97 & 0.84 & 1.00 & 1.00\\
MKA ($k=50$) & 0.13 & 0.83 & 0.93 & 0.75 & 0.93 & 0.59 & 1.00 & 1.00 & 0.36 & 0.95 & 0.98 & 0.89 & 0.97 & 0.84 & 1.00 & 1.00\\
MKA ($k=200$) & 0.13 & 0.84 & 0.95 & 0.74 & 0.93 & 0.60 & 1.00 & 1.00 & 0.37 & 0.95 & 0.98 & 0.89 & 0.98 & 0.84 & 1.00 & 1.00\\
\midrule
AlignedCosineSimilarity & 0.13 & 0.63 & 0.91 & 0.69 & 0.70 & 0.54 & 0.74 & 0.43 & 0.37 & 0.88 & 0.98 & 0.87 & 0.90 & 0.81 & 0.87 & 0.54\\
ConcentricityDifference & 0.13 & 0.50 & 0.62 & 0.41 & 0.35 & 0.53 & 0.43 & 0.53 & 0.35 & 0.78 & 0.87 & 0.78 & 0.68 & 0.74 & 0.76 & 0.80\\
DistanceCorrelation & 0.13 & 0.73 & 0.89 & 0.60 & 0.79 & 0.61 & 1.00 & 1.00 & 0.34 & 0.91 & 0.97 & 0.84 & 0.94 & 0.81 & 1.00 & 1.00\\
EigenspaceOverlapScore & 0.13 & 0.62 & 0.34 & 0.68 & 0.53 & 0.46 & 0.82 & 0.49 & 0.36 & 0.90 & 0.75 & 0.88 & 0.80 & 0.70 & 0.96 & 0.75\\
Gulp & 0.13 & 0.61 & 0.35 & 0.21 & 0.54 & 0.55 & 0.53 & 0.48 & 0.39 & 0.89 & 0.76 & 0.53 & 0.81 & 0.84 & 0.80 & 0.75\\
HardCorrelationMatch & 0.13 & 0.63 & 0.51 & 0.71 & 0.72 & 0.57 & 0.47 & 0.51 & 0.35 & 0.89 & 0.81 & 0.89 & 0.92 & 0.85 & 0.59 & 0.77\\
JaccardSimilarity & 0.13 & 0.80 & 0.95 & 0.74 & 0.88 & 0.58 & 1.00 & 0.99 & 0.36 & 0.95 & 0.98 & 0.89 & 0.97 & 0.84 & 1.00 & 1.00\\
LinearRegression & 0.13 & 0.85 & 0.87 & 0.25 & 0.81 & 0.34 & 0.72 & 0.72 & 0.38 & 0.96 & 0.95 & 0.53 & 0.93 & 0.69 & 0.83 & 0.83\\
MagnitudeDifference & 0.13 & 0.39 & 0.54 & 0.59 & 0.17 & 0.43 & 0.77 & 1.00 & 0.35 & 0.74 & 0.83 & 0.87 & 0.45 & 0.66 & 0.93 & 1.00\\
OrthogonalAngularShapeMetricCentered & 0.13 & 0.78 & 0.83 & 0.63 & 0.76 & 0.54 & 0.72 & 0.72 & 0.37 & 0.93 & 0.95 & 0.85 & 0.94 & 0.77 & 0.83 & 0.83\\
OrthogonalProcrustesCenteredAndNormalized & 0.13 & 0.78 & 0.83 & 0.63 & 0.76 & 0.54 & 0.72 & 0.72 & 0.36 & 0.93 & 0.95 & 0.85 & 0.94 & 0.77 & 0.83 & 0.83\\
PWCCA & 0.14 & 0.65 & 0.42 & —— & 0.57 & 0.51 & —— & 0.48 & 0.43 & 0.89 & 0.83 & 1.00 & 0.83 & 0.75 & 1.00 & 0.74\\
PermutationProcrustes & 0.13 & 0.61 & 0.42 & 0.65 & 0.69 & 0.40 & 0.88 & 0.73 & 0.35 & 0.89 & 0.51 & 0.87 & 0.90 & 0.62 & 0.97 & 0.87\\
ProcrustesSizeAndShapeDistance & 0.13 & 0.69 & 0.75 & 0.70 & 0.81 & 0.40 & 1.00 & 1.00 & 0.35 & 0.90 & 0.90 & 0.89 & 0.95 & 0.63 & 1.00 & 1.00\\
RSA & 0.13 & 0.57 & 0.78 & 0.68 & 0.72 & 0.59 & 0.75 & 0.49 & 0.38 & 0.84 & 0.94 & 0.86 & 0.92 & 0.84 & 0.89 & 0.78\\
RSMNormDifference & 0.13 & 0.79 & 1.00 & 0.64 & 0.93 & 0.40 & 1.00 & 1.00 & 0.35 & 0.93 & 1.00 & 0.87 & 0.97 & 0.62 & 1.00 & 1.00\\
RankSimilarity & 0.13 & 0.61 & 0.95 & 0.75 & 0.88 & 0.55 & 1.00 & 1.00 & 0.36 & 0.88 & 0.98 & 0.89 & 0.96 & 0.83 & 1.00 & 1.00\\
SecondOrderCosineSimilarity & 0.13 & 0.86 & 0.97 & 0.78 & 0.92 & 0.62 & 1.00 & 0.99 & 0.38 & 0.97 & 0.99 & 0.91 & 0.98 & 0.84 & 1.00 & 1.00\\
SoftCorrelationMatch & 0.13 & 0.70 & 0.50 & 0.73 & 0.58 & 0.55 & 0.43 & 0.46 & 0.34 & 0.92 & 0.80 & 0.87 & 0.85 & 0.84 & 0.51 & 0.71\\
UniformityDifference & 0.13 & 0.53 & 1.00 & 0.68 & 0.24 & 0.25 & 0.76 & 0.53 & 0.38 & 0.86 & 1.00 & 0.90 & 0.52 & 0.66 & 0.88 & 0.81\\
\bottomrule
\end{tabular}%
}
\end{table}
\begin{table}[htbp]
\caption{Results of Test 6 (Layer Monotonicity) for the graph domain}
\label{tab:grounding-comparison}
\centering
\small
\renewcommand{\arraystretch}{1.2}
\setlength{\tabcolsep}{4pt}
\resizebox{\textwidth}{!}{%
\begin{tabular}{l|cccccccccc|cccccccccc}
\toprule
\multicolumn{1}{c|}{\textbf{Evaluation}} &
\multicolumn{10}{c|}{\textbf{Conformity Rate}} &
\multicolumn{10}{c}{\textbf{Spearman}} \\
\multicolumn{1}{c|}{\textbf{Dataset}} &
\multicolumn{4}{c|}{\textbf{Cora}} &
\multicolumn{3}{c|}{\textbf{Flickr}} &
\multicolumn{3}{c|}{\textbf{OGBN-Arxiv}} &
\multicolumn{4}{c|}{\textbf{Cora}} &
\multicolumn{3}{c|}{\textbf{Flickr}} &
\multicolumn{3}{c}{\textbf{OGBN-Arxiv}} \\
\multicolumn{1}{c|}{\textbf{Architecture}} &
GCN & SAGE & GAT & PGNN & GCN & SAGE & GAT & GCN & SAGE & GAT & GCN & SAGE & GAT & PGNN & GCN & SAGE & GAT & GCN & SAGE & GAT \\
\midrule
CKA & 0.99 & \bf 1.00 & \bf 1.00 & 0.99 & 0.81 & \bf 0.99 & 0.63 & 0.88 & 0.70 & 0.90 & 0.99 & \bf 1.00 & \bf 1.00 & 0.99 & 0.73 & \bf 0.99 & 0.48 & 0.86 & 0.58 & 0.91 \\
CKA ($\delta=0.45$) & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.86 & \bf 0.99 & 0.80 & 0.84 & 0.80 & 0.93 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.79 & \bf 0.99 & 0.66 & 0.75 & 0.73 & 0.93 \\
CKA ($\delta=0.2$) & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.97 & \bf 0.99 & 0.85 & 0.91 & 0.96 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.98 & \bf 0.99 & 0.64 & 0.84 & 0.91 & \bf 1.00 \\
kCKA ($k=100$) & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.91 & 0.95 & 0.95 & 0.96 & 0.88 & 0.98 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.91 & 0.90 & 0.97 & 0.95 & 0.85 & 0.99 & \bf 1.00 \\
SVCCA & 0.41 & 0.78 & 0.61 & \bf 1.00 & 0.75 & 0.69 & 0.57 & 0.49 & 0.41 & 0.56 & 0.63 & 0.80 & 0.81 & \bf 1.00 & 0.53 & 0.44 & 0.47 & -0.17 & -0.10 & 0.33 \\
RTD & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.90 & 0.55 & \bf 0.99 & 0.61 & \bf 1.00 & 0.98 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.96 & 0.19 & 0.98 & 0.31 & \bf 1.00 & 0.99 & \bf 1.00 \\
IMD & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.97 & \bf 1.00 & 0.94 & \bf 0.97 & 0.85 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.97 & \bf 1.00 & 0.82 & \bf 0.97 & 0.55 & \bf 1.00 & \bf 1.00 \\
MKA ($k=100$) & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.94 & 0.99 & 0.94 & 0.96 & 0.96 & 0.99 & \bf 1.00 & \bf 1.00 & \bf 1.00 & \bf 1.00 & 0.95 & 0.98 & 0.95 & 0.94 & 0.94 & 0.99 & \bf 1.00 \\
\midrule
\midrule
CKA (linear) & 0.98 & 1.00 & 1.00 & 1.00 & 0.79 & 0.92 & 0.59 & 0.87 & 0.64 & 0.95 & 0.98 & 1.00 & 1.00 & 1.00 & 0.60 & 0.89 & 0.39 & 0.85 & 0.46 & 0.96 \\
MKA ($k=15$) & 1.00 & 1.00 & 1.00 & 1.00 & 0.98 & 0.92 & 0.99 & 0.96 & 0.99 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.98 & 0.93 & 0.98 & 0.94 & 0.99 & 1.00 \\
MKA ($k=50$) & 1.00 & 1.00 & 1.00 & 0.99 & 0.98 & 0.93 & 0.97 & 0.96 & 0.99 & 1.00 & 1.00 & 1.00 & 1.00 & 0.98 & 0.98 & 0.94 & 0.95 & 0.94 & 0.99 & 1.00 \\
MKA ($k=200$) & 1.00 & 1.00 & 1.00 & 0.93 & 0.99 & 0.94 & 0.96 & 0.96 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.94 & 0.98 & 0.95 & 0.94 & 0.94 & 1.00 & 1.00 \\
\midrule
AlignedCosineSimilarity & 0.98 & 1.00 & 1.00 & 0.48 & 0.59 & 0.77 & 0.46 & 0.83 & 0.93 & 0.89 & 0.97 & 1.00 & 1.00 & 0.67 & 0.33 & 0.68 & 0.04 & 0.84 & 0.93 & 0.84 \\
ConcentricityDifference & 0.85 & 0.25 & 0.40 & 0.15 & 0.57 & 0.36 & 0.58 & 0.70 & 0.74 & 0.67 & 0.85 & 0.52 & 0.62 & 0.53 & 0.38 & -0.27 & 0.24 & 0.73 & 0.39 & 0.40 \\
DistanceCorrelation & 1.00 & 1.00 & 1.00 & 0.93 & 0.81 & 0.99 & 0.63 & 0.88 & 0.65 & 0.92 & 1.00 & 1.00 & 1.00 & 0.99 & 0.63 & 0.99 & 0.47 & 0.81 & 0.52 & 0.93 \\
EigenspaceOverlapScore & 1.00 & 1.00 & 1.00 & 0.99 & 1.00 & 1.00 & 0.92 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.98 & 1.00 & 1.00 & 0.92 & 1.00 & 1.00 & 1.00 \\
Gulp & 0.98 & 1.00 & 1.00 & 0.92 & 0.74 & 1.00 & 0.74 & 0.88 & 1.00 & 1.00 & 0.98 & 1.00 & 1.00 & 0.92 & 0.43 & 1.00 & 0.38 & 0.80 & 1.00 & 1.00 \\
HardCorrelationMatch & 0.83 & 0.98 & 0.54 & 0.80 & 0.81 & 0.84 & 0.68 & 0.86 & 0.72 & 1.00 & 0.91 & 0.99 & 0.76 & 0.84 & 0.63 & 0.80 & 0.21 & 0.83 & 0.62 & 1.00 \\
JaccardSimilarity & 1.00 & 1.00 & 1.00 & 1.00 & 0.94 & 0.96 & 0.97 & 0.96 & 0.98 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.95 & 0.97 & 0.96 & 0.97 & 0.99 & 1.00 \\
LinearRegression & 1.00 & 1.00 & 1.00 & 0.92 & 0.63 & 1.00 & 0.58 & 0.98 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.95 & 0.19 & 1.00 & 0.01 & 0.99 & 1.00 & 1.00 \\
MagnitudeDifference & 0.55 & 0.49 & 0.89 & 0.91 & 0.65 & 0.63 & 0.58 & 0.52 & 0.71 & 0.87 & 0.63 & 0.72 & 0.92 & 0.97 & 0.58 & 0.50 & 0.38 & 0.06 & 0.29 & 0.63 \\
OrthogonalAngularShapeMetricCentered & 1.00 & 1.00 & 1.00 & 0.91 & 0.80 & 0.99 & 0.66 & 0.95 & 0.99 & 0.99 & 1.00 & 1.00 & 1.00 & 0.97 & 0.62 & 0.98 & 0.45 & 0.97 & 0.99 & 0.99 \\
OrthogonalProcrustesCenteredAndNormalized & 1.00 & 1.00 & 1.00 & 0.91 & 0.80 & 0.99 & 0.66 & 0.95 & 0.99 & 0.99 & 1.00 & 1.00 & 1.00 & 0.97 & 0.62 & 0.98 & 0.45 & 0.97 & 0.99 & 0.99 \\
PWCCA & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.75 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.98 & 1.00 & 0.37 & 1.00 & 1.00 & 1.00 & ——\\
PermutationProcrustes & 0.92 & 1.00 & 1.00 & 1.00 & 0.68 & 0.72 & 0.75 & 0.69 & 0.87 & 1.00 & 0.91 & 1.00 & 1.00 & 1.00 & 0.60 & 0.68 & 0.63 & 0.24 & 0.88 & 1.00 \\
ProcrustesSizeAndShapeDistance & 0.99 & 1.00 & 1.00 & 0.91 & 0.75 & 1.00 & 0.78 & 0.93 & 0.96 & 1.00 & 0.99 & 1.00 & 1.00 & 0.97 & 0.66 & 1.00 & 0.62 & 0.83 & 0.85 & 1.00 \\
RSA & 0.84 & 0.91 & 1.00 & 0.61 & 0.70 & 0.98 & 0.70 & 0.81 & 0.97 & 0.94 & 0.90 & 0.97 & 1.00 & 0.74 & 0.58 & 0.99 & 0.44 & 0.52 & 0.97 & 0.94 \\
RSMNormDifference & 0.99 & 1.00 & 1.00 & 0.91 & 0.66 & 0.68 & 0.92 & 0.85 & 0.93 & 1.00 & 0.99 & 1.00 & 1.00 & 0.97 & 0.54 & 0.65 & 0.81 & 0.85 & 0.93 & 1.00 \\
RankSimilarity & 1.00 & 1.00 & 1.00 & 1.00 & 0.95 & 0.95 & 0.98 & 0.99 & 0.98 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.97 & 0.97 & 0.99 & 0.99 & 0.99 & 1.00 \\
SecondOrderCosineSimilarity & 1.00 & 1.00 & 1.00 & 0.90 & 0.91 & 0.96 & 0.96 & 0.95 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.90 & 0.78 & 0.96 & 0.88 & 0.97 & 1.00 & 1.00 \\
SoftCorrelationMatch & 0.96 & 0.99 & 0.50 & 0.70 & 0.89 & 0.88 & 0.64 & 0.91 & 0.73 & 1.00 & 0.95 & 0.99 & 0.78 & 0.80 & 0.79 & 0.89 & 0.11 & 0.91 & 0.62 & 1.00 \\
UniformityDifference & 0.67 & 0.67 & 0.59 & 0.58 & 0.94 & 0.92 & 0.94 & 0.60 & 0.41 & 0.83 & 0.70 & 0.70 & 0.69 & 0.70 & 0.82 & 0.89 & 0.82 & 0.33 & -0.45 & 0.68 \\
\bottomrule
\end{tabular}%
}
\end{table}
\end{document}
|