File size: 3,340 Bytes
fc0f7bd | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""A variety of extra metrics useful for assessing fairness.
These are metrics which are not part of `scikit-learn`.
"""
import sklearn.metrics as skm
from ._metrics_engine import make_group_metric
from ._balanced_root_mean_squared_error import balanced_root_mean_squared_error # noqa: F401
from ._mean_predictions import mean_prediction, mean_overprediction, mean_underprediction # noqa: F401,E501
from ._selection_rate import selection_rate # noqa: F401,E501
def specificity_score(y_true, y_pred, sample_weight=None):
r"""Calculate the specificity score (also called the True Negative Rate).
At the present time, this routine only supports binary
classifiers with labels :math:`\in {0, 1}`.
The calculation uses the :py:func:`sklearn.metrics.confusion_matrix` routine.
"""
cm = skm.confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
# Taken from
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html
# This restricts us to binary classification
tn, fp, _, _ = cm.ravel()
return tn / (tn + fp)
def miss_rate(y_true, y_pred, sample_weight=None):
r"""Calculate the miss rate (also called the False Negative Rate).
At the present time, this routine only supports binary
classifiers with labels :math:`\in {0, 1}`.
By definition, this is the complement of the True Positive
Rate, so this routine uses the
:py:func:`sklearn.metrics.recall_score` routine.
"""
# aka False Negative Rate
tpr = skm.recall_score(y_true, y_pred, sample_weight=sample_weight)
# FNR == 1 - TPR
return 1 - tpr
def fallout_rate(y_true, y_pred, sample_weight=None):
r"""Calculate the fallout rate (also called the False Positive Rate).
At the present time, this routine only supports binary
classifiers with labels :math:`\in {0, 1}`.
By definition, this is the complement of the
Specificity, and so uses :py:func:`specificity_score` in its
calculation.
"""
# aka False Positive Rate
# Since we use specificity, also restricted to binary classification
return 1 - specificity_score(y_true, y_pred, sample_weight)
# =============================================================
# Group metrics
# Classification metrics
group_fallout_rate = make_group_metric(fallout_rate)
"""A grouped metric for the :py:func:`fallout_rate`
"""
group_miss_rate = make_group_metric(miss_rate)
"""A grouped metric for the :py:func:`miss_rate`
"""
group_specificity_score = make_group_metric(specificity_score)
"""A grouped metric for the :py:func:`specificity_score`
"""
# Regression metrics
group_balanced_root_mean_squared_error = make_group_metric(
balanced_root_mean_squared_error)
"""A grouped wrapper around the :py:func:`balanced_root_mean_squared_error` routine
"""
group_mean_prediction = make_group_metric(mean_prediction)
"""A grouped wrapper around the :py:func:`mean_prediction` routine
"""
group_mean_overprediction = make_group_metric(mean_overprediction)
"""A grouped wrapper around the :py:func:`mean_overprediction` routine
"""
group_mean_underprediction = make_group_metric(mean_underprediction)
"""A grouped wapper around the :py:func:`mean_underprediction` routine
"""
|