File size: 2,512 Bytes
fc0f7bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.

import pytest

from fairlearn.postprocessing import ThresholdOptimizer
from fairlearn.reductions import ExponentiatedGradient, GridSearch, EqualizedOdds, \
    DemographicParity

try:
    from tempeh.execution.azureml.workspace import get_workspace
except ImportError:
    raise Exception("fairlearn performance tests require azureml-sdk to be installed.")

from environment_setup import build_package


THRESHOLD_OPTIMIZER = ThresholdOptimizer.__name__
EXPONENTIATED_GRADIENT = ExponentiatedGradient.__name__
GRID_SEARCH = GridSearch.__name__

MEMORY = "memory"
TIME = "time"

ADULT_UCI = 'adult_uci'
COMPAS = 'compas'

RBM_SVM = 'rbm_svm'
DECISION_TREE_CLASSIFIER = 'decision_tree_classifier'

DATASETS = [ADULT_UCI, COMPAS]
PREDICTORS = [RBM_SVM, DECISION_TREE_CLASSIFIER]
MITIGATORS = [THRESHOLD_OPTIMIZER, EXPONENTIATED_GRADIENT, GRID_SEARCH]


class PerfTestConfiguration:
    def __init__(self, dataset, predictor, mitigator, disparity_metric):
        self.dataset = dataset
        self.predictor = predictor
        self.mitigator = mitigator
        self.disparity_metric = disparity_metric

    def __repr__(self):
        return "[dataset: {}, predictor: {}, mitigator: {}, disparity_metric: {}]" \
               .format(self.dataset, self.predictor, self.mitigator, self.disparity_metric)


def get_all_perf_test_configurations():
    perf_test_configurations = []
    for dataset in DATASETS:
        for predictor in PREDICTORS:
            for mitigator in MITIGATORS:
                if mitigator == THRESHOLD_OPTIMIZER:
                    disparity_metrics = ["equalized_odds", "demographic_parity"]
                elif mitigator == EXPONENTIATED_GRADIENT:
                    disparity_metrics = [EqualizedOdds.__name__, DemographicParity.__name__]
                elif mitigator == GRID_SEARCH:
                    disparity_metrics = [EqualizedOdds.__name__, DemographicParity.__name__]
                else:
                    raise Exception("Unknown mitigator {}".format(mitigator))

                for disparity_metric in disparity_metrics:
                    perf_test_configurations.append(
                        PerfTestConfiguration(dataset, predictor, mitigator, disparity_metric))

    return perf_test_configurations


@pytest.fixture(scope="session")
def workspace():
    return get_workspace()


@pytest.fixture(scope="session")
def wheel_file():
    return build_package()