| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import os |
| import pickle |
| import unittest |
| from collections import UserDict, namedtuple |
|
|
| import torch |
|
|
| from accelerate.test_utils.testing import require_cuda |
| from accelerate.test_utils.training import RegressionModel |
| from accelerate.utils import ( |
| convert_outputs_to_fp32, |
| extract_model_from_parallel, |
| find_device, |
| patch_environment, |
| send_to_device, |
| ) |
|
|
|
|
| ExampleNamedTuple = namedtuple("ExampleNamedTuple", "a b c") |
|
|
|
|
| class UtilsTester(unittest.TestCase): |
| def test_send_to_device(self): |
| tensor = torch.randn(5, 2) |
| device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") |
|
|
| result1 = send_to_device(tensor, device) |
| self.assertTrue(torch.equal(result1.cpu(), tensor)) |
|
|
| result2 = send_to_device((tensor, [tensor, tensor], 1), device) |
| self.assertIsInstance(result2, tuple) |
| self.assertTrue(torch.equal(result2[0].cpu(), tensor)) |
| self.assertIsInstance(result2[1], list) |
| self.assertTrue(torch.equal(result2[1][0].cpu(), tensor)) |
| self.assertTrue(torch.equal(result2[1][1].cpu(), tensor)) |
| self.assertEqual(result2[2], 1) |
|
|
| result2 = send_to_device({"a": tensor, "b": [tensor, tensor], "c": 1}, device) |
| self.assertIsInstance(result2, dict) |
| self.assertTrue(torch.equal(result2["a"].cpu(), tensor)) |
| self.assertIsInstance(result2["b"], list) |
| self.assertTrue(torch.equal(result2["b"][0].cpu(), tensor)) |
| self.assertTrue(torch.equal(result2["b"][1].cpu(), tensor)) |
| self.assertEqual(result2["c"], 1) |
|
|
| result3 = send_to_device(ExampleNamedTuple(a=tensor, b=[tensor, tensor], c=1), device) |
| self.assertIsInstance(result3, ExampleNamedTuple) |
| self.assertTrue(torch.equal(result3.a.cpu(), tensor)) |
| self.assertIsInstance(result3.b, list) |
| self.assertTrue(torch.equal(result3.b[0].cpu(), tensor)) |
| self.assertTrue(torch.equal(result3.b[1].cpu(), tensor)) |
| self.assertEqual(result3.c, 1) |
|
|
| result4 = send_to_device(UserDict({"a": tensor, "b": [tensor, tensor], "c": 1}), device) |
| self.assertIsInstance(result4, UserDict) |
| self.assertTrue(torch.equal(result4["a"].cpu(), tensor)) |
| self.assertIsInstance(result4["b"], list) |
| self.assertTrue(torch.equal(result4["b"][0].cpu(), tensor)) |
| self.assertTrue(torch.equal(result4["b"][1].cpu(), tensor)) |
| self.assertEqual(result4["c"], 1) |
|
|
| def test_patch_environment(self): |
| with patch_environment(aa=1, BB=2): |
| self.assertEqual(os.environ.get("AA"), "1") |
| self.assertEqual(os.environ.get("BB"), "2") |
|
|
| self.assertNotIn("AA", os.environ) |
| self.assertNotIn("BB", os.environ) |
|
|
| def test_can_undo_convert_outputs(self): |
| model = RegressionModel() |
| model._original_forward = model.forward |
| model.forward = convert_outputs_to_fp32(model.forward) |
| model = extract_model_from_parallel(model) |
| _ = pickle.dumps(model) |
|
|
| @require_cuda |
| def test_can_undo_fp16_conversion(self): |
| model = RegressionModel() |
| model._original_forward = model.forward |
| model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward) |
| model.forward = convert_outputs_to_fp32(model.forward) |
| model = extract_model_from_parallel(model) |
| _ = pickle.dumps(model) |
|
|
| def test_find_device(self): |
| self.assertEqual(find_device([1, "a", torch.tensor([1, 2, 3])]), torch.device("cpu")) |
| self.assertEqual(find_device({"a": 1, "b": torch.tensor([1, 2, 3])}), torch.device("cpu")) |
| self.assertIsNone(find_device([1, "a"])) |
|
|