File size: 2,882 Bytes
2b06d1d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import os
from collections import namedtuple

import pytest

from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict


_TestCommandArgs = namedtuple(
    "_TestCommandArgs",
    [
        "dataset",
        "name",
        "cache_dir",
        "data_dir",
        "all_configs",
        "save_infos",
        "ignore_verifications",
        "force_redownload",
        "clear_cache",
    ],
    defaults=[None, None, None, False, False, False, False, False],
)


def is_1percent_close(source, target):
    return (abs(source - target) / target) < 0.01


@pytest.mark.integration
def test_test_command(dataset_loading_script_dir):
    args = _TestCommandArgs(dataset=dataset_loading_script_dir, all_configs=True, save_infos=True)
    test_command = TestCommand(*args)
    test_command.run()
    dataset_readme_path = os.path.join(dataset_loading_script_dir, "README.md")
    assert os.path.exists(dataset_readme_path)
    dataset_infos = DatasetInfosDict.from_directory(dataset_loading_script_dir)
    expected_dataset_infos = DatasetInfosDict(
        {
            "default": DatasetInfo(
                features=Features(
                    {
                        "tokens": Sequence(Value("string")),
                        "ner_tags": Sequence(
                            ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"])
                        ),
                        "langs": Sequence(Value("string")),
                        "spans": Sequence(Value("string")),
                    }
                ),
                splits=[
                    {
                        "name": "train",
                        "num_bytes": 2351563,
                        "num_examples": 10000,
                    },
                    {
                        "name": "validation",
                        "num_bytes": 238418,
                        "num_examples": 1000,
                    },
                ],
                download_size=3940680,
                dataset_size=2589981,
            )
        }
    )
    assert dataset_infos.keys() == expected_dataset_infos.keys()
    for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
        result, expected = getattr(dataset_infos["default"], key), getattr(expected_dataset_infos["default"], key)
        if key == "num_bytes":
            assert is_1percent_close(result, expected)
        elif key == "splits":
            assert list(result) == list(expected)
            for split in result:
                assert result[split].name == expected[split].name
                assert result[split].num_examples == expected[split].num_examples
                assert is_1percent_close(result[split].num_bytes, expected[split].num_bytes)
        else:
            result == expected