File size: 4,833 Bytes
8766bc5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import os
from copy import deepcopy

import numpy as np
import pytest
from gradio_client import media_data

import gradio.interpretation
from gradio import Interface
from gradio.processing_utils import decode_base64_to_image

os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"


def max_word_len(text: str) -> int:
    return max([len(word) for word in text.split(" ")])


class TestDefault:
    @pytest.mark.asyncio
    async def test_default_text(self):
        text_interface = Interface(
            max_word_len, "textbox", "label", interpretation="default"
        )
        interpretation = (await text_interface.interpret(["quickest brown fox"]))[0][
            "interpretation"
        ]
        assert interpretation[0][1] > 0  # Checks to see if the first word has >0 score.
        assert interpretation[-1][1] == 0  # Checks to see if the last word has 0 score.


class TestShapley:
    @pytest.mark.asyncio
    async def test_shapley_text(self):
        text_interface = Interface(
            max_word_len, "textbox", "label", interpretation="shapley"
        )
        interpretation = (await text_interface.interpret(["quickest brown fox"]))[0][
            "interpretation"
        ][0]
        assert interpretation[1] > 0  # Checks to see if the first word has >0 score.


class TestCustom:
    @pytest.mark.asyncio
    async def test_custom_text(self):
        def custom(text):
            return [(char, 1) for char in text]

        text_interface = Interface(
            max_word_len, "textbox", "label", interpretation=custom
        )
        result = (await text_interface.interpret(["quickest brown fox"]))[0][
            "interpretation"
        ][0]
        assert result[1] == 1  # Checks to see if the first letter has score of 1.

    @pytest.mark.asyncio
    async def test_custom_img(self):
        def max_pixel_value(img):
            return img.max()

        def custom(img):
            return img.tolist()

        img_interface = Interface(
            max_pixel_value, "image", "label", interpretation=custom
        )
        result = (await img_interface.interpret([deepcopy(media_data.BASE64_IMAGE)]))[
            0
        ]["interpretation"]
        expected_result = np.asarray(
            decode_base64_to_image(deepcopy(media_data.BASE64_IMAGE)).convert("RGB")
        ).tolist()
        assert result == expected_result


class TestHelperMethods:
    def test_diff(self):
        diff = gradio.interpretation.diff(13, "2")
        assert diff == 11
        diff = gradio.interpretation.diff("cat", "dog")
        assert diff == 1
        diff = gradio.interpretation.diff("cat", "cat")
        assert diff == 0

    def test_quantify_difference_with_number(self):
        iface = Interface(lambda text: text, ["textbox"], ["number"])
        diff = gradio.interpretation.quantify_difference_in_label(iface, [4], [6])
        assert diff == -2

    def test_quantify_difference_with_label(self):
        iface = Interface(lambda text: len(text), ["textbox"], ["label"])
        diff = gradio.interpretation.quantify_difference_in_label(iface, ["3"], ["10"])
        assert diff == -7
        diff = gradio.interpretation.quantify_difference_in_label(iface, ["0"], ["100"])
        assert diff == -100

    def test_quantify_difference_with_confidences(self):
        iface = Interface(lambda text: len(text), ["textbox"], ["label"])
        output_1 = {"cat": 0.9, "dog": 0.1}
        output_2 = {"cat": 0.6, "dog": 0.4}
        output_3 = {"cat": 0.1, "dog": 0.6}
        diff = gradio.interpretation.quantify_difference_in_label(
            iface, [output_1], [output_2]
        )
        assert pytest.approx(diff) == 0.3
        diff = gradio.interpretation.quantify_difference_in_label(
            iface, [output_1], [output_3]
        )
        assert pytest.approx(diff) == 0.8

    def test_get_regression_value(self):
        iface = Interface(lambda text: text, ["textbox"], ["label"])
        output_1 = {"cat": 0.9, "dog": 0.1}
        output_2 = {"cat": float("nan"), "dog": 0.4}
        output_3 = {"cat": 0.1, "dog": 0.6}
        diff = gradio.interpretation.get_regression_or_classification_value(
            iface, [output_1], [output_2]
        )
        assert diff == 0
        diff = gradio.interpretation.get_regression_or_classification_value(
            iface, [output_1], [output_3]
        )
        assert pytest.approx(diff) == 0.1

    def test_get_classification_value(self):
        iface = Interface(lambda text: text, ["textbox"], ["label"])
        diff = gradio.interpretation.get_regression_or_classification_value(
            iface, ["cat"], ["test"]
        )
        assert diff == 1
        diff = gradio.interpretation.get_regression_or_classification_value(
            iface, ["test"], ["test"]
        )
        assert diff == 0