File size: 9,523 Bytes
fc0f7bd | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Defines the fairlearn dashboard class."""
from ._fairlearn_widget import FairlearnWidget
from fairlearn.metrics import group_accuracy_score, group_precision_score,\
group_recall_score, group_zero_one_loss, group_max_error, group_mean_absolute_error,\
group_mean_squared_error, group_median_absolute_error,\
group_specificity_score, group_miss_rate, group_fallout_rate, group_selection_rate,\
group_balanced_root_mean_squared_error, group_mean_overprediction, group_r2_score, \
group_mean_underprediction, group_mean_prediction, group_roc_auc_score,\
group_root_mean_squared_error
from IPython.display import display
from scipy.sparse import issparse
import copy
import numpy as np
import pandas as pd
class FairlearnDashboard(object):
"""The dashboard class, wraps the dashboard component.
:param sensitive_features: A matrix of feature vector examples (# examples x # features),
these can be from the initial dataset, or reserved from training.
:type sensitive_features: numpy.array or list[][] or pandas.DataFrame or pandas.Series
:param y_true: The true labels or values for the provided dataset.
:type y_true: numpy.array or list[]
:param y_pred: Array of output predictions from models to be evaluated. Can be a single
array of predictions, or a 2D list over multiple models. Can be a dictionary
of named model predictions.
:type y_pred: numpy.array or list[][] or list[] or dict {string: list[]}
:param sensitive_feature_names: Feature names
:type sensitive_feature_names: numpy.array or list[]
"""
def __init__(
self, *,
sensitive_features,
y_true, y_pred,
sensitive_feature_names=None):
"""Initialize the fairlearn Dashboard."""
self._widget_instance = FairlearnWidget()
if sensitive_features is None or y_true is None or y_pred is None:
raise ValueError("Required parameters not provided")
# The following mappings should match those in the GroupMetricSet
# Issue 269 has been opened to track the work for unifying the two
self._metric_methods = {
"accuracy_score": {
"model_type": ["classification"],
"function": group_accuracy_score
},
"balanced_accuracy_score": {
"model_type": ["classification"],
"function": group_roc_auc_score
},
"precision_score": {
"model_type": ["classification"],
"function": group_precision_score
},
"recall_score": {
"model_type": ["classification"],
"function": group_recall_score
},
"zero_one_loss": {
"model_type": [],
"function": group_zero_one_loss
},
"specificity_score": {
"model_type": [],
"function": group_specificity_score
},
"miss_rate": {
"model_type": [],
"function": group_miss_rate
},
"fallout_rate": {
"model_type": [],
"function": group_fallout_rate
},
"false_positive_over_total": {
"model_type": [],
"function": group_fallout_rate
},
"false_negative_over_total": {
"model_type": [],
"function": group_miss_rate
},
"selection_rate": {
"model_type": [],
"function": group_selection_rate
},
"auc": {
"model_type": ["probability"],
"function": group_roc_auc_score
},
"root_mean_squared_error": {
"model_type": ["regression", "probability"],
"function": group_root_mean_squared_error
},
"balanced_root_mean_squared_error": {
"model_type": ["probability"],
"function": group_balanced_root_mean_squared_error
},
"mean_squared_error": {
"model_type": ["regression", "probability"],
"function": group_mean_squared_error
},
"mean_absolute_error": {
"model_type": ["regression", "probability"],
"function": group_mean_absolute_error
},
"r2_score": {
"model_type": ["regression"],
"function": group_r2_score
},
"max_error": {
"model_type": [],
"function": group_max_error
},
"median_absolute_error": {
"model_type": [],
"function": group_median_absolute_error
},
"overprediction": {
"model_type": [],
"function": group_mean_overprediction
},
"underprediction": {
"model_type": [],
"function": group_mean_underprediction
},
"average": {
"model_type": [],
"function": group_mean_prediction
}
}
classification_methods = [method[0] for method in self._metric_methods.items()
if "classification" in method[1]["model_type"]]
regression_methods = [method[0] for method in self._metric_methods.items()
if "regression" in method[1]["model_type"]]
probability_methods = [method[0] for method in self._metric_methods.items()
if "probability" in method[1]["model_type"]]
dataset = self._sanitize_data_shape(sensitive_features)
model_names = None
if isinstance(y_pred, dict):
model_names = []
self._y_pred = []
for k, v in y_pred.items():
model_names.append(k)
self._y_pred.append(self._convert_to_list(v))
else:
self._y_pred = self._convert_to_list(y_pred)
if len(np.shape(self._y_pred)) == 1:
self._y_pred = [self._y_pred]
self._y_true = self._convert_to_list(y_true)
if np.shape(self._y_true)[0] != np.shape(self._y_pred)[1]:
raise ValueError("Predicted y does not match true y shape")
if np.shape(self._y_true)[0] != np.shape(dataset)[0]:
raise ValueError("Sensitive features shape does not match true y shape")
dataArg = {
"true_y": self._y_true,
"predicted_ys": self._y_pred,
"dataset": dataset,
"classification_methods": classification_methods,
"regression_methods": regression_methods,
"probability_methods": probability_methods,
"model_names": model_names
}
if sensitive_feature_names is not None:
sensitive_feature_names = self._convert_to_list(sensitive_feature_names)
if np.shape(dataset)[1] != np.shape(sensitive_feature_names)[0]:
raise Warning("Feature names shape does not match dataset, ignoring")
else:
dataArg["features"] = sensitive_feature_names
self._widget_instance.value = dataArg
self._widget_instance.observe(self._on_request, names="request")
display(self._widget_instance)
def _on_request(self, change):
try:
new = change.new
response = copy.deepcopy(self._widget_instance.response)
for id in new: # noqa: A001
try:
if id not in response:
data = new[id]
method = self._metric_methods.get(data["metricKey"]).get("function")
binVector = data["binVector"]
prediction = method(
self._y_true,
self._y_pred[data["modelIndex"]],
binVector)
response[id] = {
"global": prediction.overall,
"bins": prediction.by_group
}
except Exception as ed:
response[id] = {
"error": ed,
"global": 0,
"bins": []}
self._widget_instance.response = response
except Exception:
raise ValueError("Error while making request")
def _show(self):
display(self._widget_instance)
def _sanitize_data_shape(self, dataset):
result = self._convert_to_list(dataset)
# Dataset should be 2d, if not we need to map
if (len(np.shape(result)) == 2):
return result
return list(map(lambda x: [x], result))
def _convert_to_list(self, array):
if issparse(array):
if array.shape[1] > 1000:
raise ValueError("Exceeds maximum number of features for visualization (1000)")
return array.toarray().tolist()
if (isinstance(array, pd.DataFrame) or isinstance(array, pd.Series)):
return array.values.tolist()
if (isinstance(array, np.ndarray)):
return array.tolist()
return array
|