| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import inspect |
| import os |
| import sys |
| import unittest |
| from dataclasses import dataclass |
|
|
| import torch |
|
|
| from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs |
| from accelerate.state import AcceleratorState |
| from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu |
| from accelerate.utils import KwargsHandler |
|
|
|
|
| @dataclass |
| class MockClass(KwargsHandler): |
| a: int = 0 |
| b: bool = False |
| c: float = 3.0 |
|
|
|
|
| class DataLoaderTester(unittest.TestCase): |
| def test_kwargs_handler(self): |
| |
| self.assertDictEqual(MockClass().to_kwargs(), {}) |
| self.assertDictEqual(MockClass(a=2).to_kwargs(), {"a": 2}) |
| self.assertDictEqual(MockClass(a=2, b=True).to_kwargs(), {"a": 2, "b": True}) |
| self.assertDictEqual(MockClass(a=2, c=2.25).to_kwargs(), {"a": 2, "c": 2.25}) |
|
|
| @require_cuda |
| def test_grad_scaler_kwargs(self): |
| |
| scaler_handler = GradScalerKwargs(init_scale=1024, growth_factor=2) |
| AcceleratorState._reset_state() |
| accelerator = Accelerator(mixed_precision="fp16", kwargs_handlers=[scaler_handler]) |
| print(accelerator.use_fp16) |
| scaler = accelerator.scaler |
|
|
| |
| self.assertEqual(scaler._init_scale, 1024.0) |
| self.assertEqual(scaler._growth_factor, 2.0) |
|
|
| |
| self.assertEqual(scaler._backoff_factor, 0.5) |
| self.assertEqual(scaler._growth_interval, 2000) |
| self.assertEqual(scaler._enabled, True) |
|
|
| @require_multi_gpu |
| def test_ddp_kwargs(self): |
| distributed_args = f""" |
| -m torch.distributed.launch |
| --nproc_per_node={torch.cuda.device_count()} |
| --use_env |
| {inspect.getfile(self.__class__)} |
| """.split() |
| cmd = [sys.executable] + distributed_args |
| execute_subprocess_async(cmd, env=os.environ.copy()) |
|
|
|
|
| if __name__ == "__main__": |
| ddp_scaler = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) |
| accelerator = Accelerator(kwargs_handlers=[ddp_scaler]) |
| model = torch.nn.Linear(100, 200) |
| model = accelerator.prepare(model) |
|
|
| |
| error_msg = "" |
| observed_bucket_cap_map = model.bucket_bytes_cap // (1024 * 1024) |
| if observed_bucket_cap_map != 15: |
| error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" |
| if model.find_unused_parameters is not True: |
| error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" |
|
|
| |
| if model.dim != 0: |
| error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" |
| if model.broadcast_buffers is not True: |
| error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" |
| if model.gradient_as_bucket_view is not False: |
| error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" |
|
|
| |
| if len(error_msg) > 0: |
| raise ValueError(error_msg) |
|
|