| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import logging |
| import os |
| import random |
| import tempfile |
| import unittest |
|
|
| import torch |
| from torch import nn |
| from torch.utils.data import DataLoader, TensorDataset |
|
|
| from accelerate import Accelerator |
| from accelerate.utils import ProjectConfiguration, set_seed |
|
|
|
|
| logger = logging.getLogger(__name__) |
|
|
|
|
| def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2): |
| "Generates a tuple of dummy DataLoaders to test with" |
|
|
| def get_dataset(n_batches): |
| x = torch.randn(batch_size * n_batches, 1) |
| return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1)) |
|
|
| train_dataset = get_dataset(n_train_batches) |
| valid_dataset = get_dataset(n_valid_batches) |
| train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) |
| valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4) |
| return (train_dataloader, valid_dataloader) |
|
|
|
|
| def train(num_epochs, model, dataloader, optimizer, accelerator, scheduler=None): |
| "Trains for `num_epochs`" |
| rands = [] |
| for epoch in range(num_epochs): |
| |
| model.train() |
| for batch in dataloader: |
| x, y = batch |
| outputs = model(x) |
| loss = torch.nn.functional.mse_loss(outputs, y) |
| accelerator.backward(loss) |
| optimizer.step() |
| optimizer.zero_grad() |
| rands.append(random.random()) |
| if scheduler is not None: |
| scheduler.step() |
| return rands |
|
|
|
|
| class DummyModel(nn.Module): |
| "Simple model to do y=mx+b" |
|
|
| def __init__(self): |
| super().__init__() |
| self.a = nn.Parameter(torch.randn(1)) |
| self.b = nn.Parameter(torch.randn(1)) |
|
|
| def forward(self, x): |
| return x * self.a + self.b |
|
|
|
|
| class CheckpointTest(unittest.TestCase): |
| def test_with_save_limit(self): |
| with tempfile.TemporaryDirectory() as tmpdir: |
| set_seed(42) |
| model = DummyModel() |
| optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) |
| train_dataloader, valid_dataloader = dummy_dataloaders() |
| project_config = ProjectConfiguration(total_limit=1, project_dir=tmpdir, automatic_checkpoint_naming=True) |
| |
| accelerator = Accelerator(project_config=project_config) |
| model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( |
| model, optimizer, train_dataloader, valid_dataloader |
| ) |
| |
| accelerator.save_state() |
|
|
| |
| accelerator.save_state() |
| self.assertEqual(len(os.listdir(accelerator.project_dir)), 1) |
|
|
| def test_can_resume_training_with_folder(self): |
| with tempfile.TemporaryDirectory() as tmpdir: |
| set_seed(42) |
| model = DummyModel() |
| optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) |
| train_dataloader, valid_dataloader = dummy_dataloaders() |
| |
| accelerator = Accelerator() |
| model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( |
| model, optimizer, train_dataloader, valid_dataloader |
| ) |
| |
| initial = os.path.join(tmpdir, "initial") |
| accelerator.save_state(initial) |
| (a, b) = model.a.item(), model.b.item() |
| opt_state = optimizer.state_dict() |
| ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator) |
| (a1, b1) = model.a.item(), model.b.item() |
| opt_state1 = optimizer.state_dict() |
|
|
| |
| set_seed(42) |
| model = DummyModel() |
| optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) |
| train_dataloader, valid_dataloader = dummy_dataloaders() |
| accelerator = Accelerator() |
| model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( |
| model, optimizer, train_dataloader, valid_dataloader |
| ) |
| accelerator.load_state(initial) |
| (a2, b2) = model.a.item(), model.b.item() |
| opt_state2 = optimizer.state_dict() |
| self.assertEqual(a, a2) |
| self.assertEqual(b, b2) |
| self.assertEqual(opt_state, opt_state2) |
|
|
| test_rands = train(2, model, train_dataloader, optimizer, accelerator) |
| |
| checkpoint = os.path.join(tmpdir, "checkpoint") |
| accelerator.save_state(checkpoint) |
|
|
| |
| accelerator.load_state(checkpoint) |
| test_rands += train(1, model, train_dataloader, optimizer, accelerator) |
| (a3, b3) = model.a.item(), model.b.item() |
| opt_state3 = optimizer.state_dict() |
| self.assertEqual(a1, a3) |
| self.assertEqual(b1, b3) |
| self.assertEqual(opt_state1, opt_state3) |
| self.assertEqual(ground_truth_rands, test_rands) |
|
|
| def test_can_resume_training(self): |
| with tempfile.TemporaryDirectory() as tmpdir: |
| set_seed(42) |
| model = DummyModel() |
| optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) |
| train_dataloader, valid_dataloader = dummy_dataloaders() |
| project_config = ProjectConfiguration(automatic_checkpoint_naming=True) |
|
|
| |
| accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) |
| model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( |
| model, optimizer, train_dataloader, valid_dataloader |
| ) |
| |
| accelerator.save_state() |
| (a, b) = model.a.item(), model.b.item() |
| opt_state = optimizer.state_dict() |
| ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator) |
| (a1, b1) = model.a.item(), model.b.item() |
| opt_state1 = optimizer.state_dict() |
|
|
| |
| set_seed(42) |
| model = DummyModel() |
| optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) |
| train_dataloader, valid_dataloader = dummy_dataloaders() |
| project_config = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=True) |
| accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) |
| model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( |
| model, optimizer, train_dataloader, valid_dataloader |
| ) |
| accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_0")) |
| (a2, b2) = model.a.item(), model.b.item() |
| opt_state2 = optimizer.state_dict() |
| self.assertEqual(a, a2) |
| self.assertEqual(b, b2) |
| self.assertEqual(opt_state, opt_state2) |
|
|
| test_rands = train(2, model, train_dataloader, optimizer, accelerator) |
| |
| accelerator.save_state() |
|
|
| |
| accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_1")) |
| test_rands += train(1, model, train_dataloader, optimizer, accelerator) |
| (a3, b3) = model.a.item(), model.b.item() |
| opt_state3 = optimizer.state_dict() |
| self.assertEqual(a1, a3) |
| self.assertEqual(b1, b3) |
| self.assertEqual(opt_state1, opt_state3) |
| self.assertEqual(ground_truth_rands, test_rands) |
|
|
| def test_invalid_registration(self): |
| t = torch.tensor([1, 2, 3]) |
| t1 = torch.tensor([2, 3, 4]) |
| net = DummyModel() |
| opt = torch.optim.Adam(net.parameters()) |
| accelerator = Accelerator() |
| with self.assertRaises(ValueError) as ve: |
| accelerator.register_for_checkpointing(t, t1, net, opt) |
| message = str(ve.exception) |
| self.assertTrue("Item at index 0" in message) |
| self.assertTrue("Item at index 1" in message) |
| self.assertFalse("Item at index 2" in message) |
| self.assertFalse("Item at index 3" in message) |
|
|
| def test_with_scheduler(self): |
| with tempfile.TemporaryDirectory() as tmpdir: |
| set_seed(42) |
| model = DummyModel() |
| optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) |
| scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) |
| train_dataloader, valid_dataloader = dummy_dataloaders() |
| project_config = ProjectConfiguration(automatic_checkpoint_naming=True) |
| |
| accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) |
| model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare( |
| model, optimizer, train_dataloader, valid_dataloader, scheduler |
| ) |
| |
| accelerator.save_state() |
| scheduler_state = scheduler.state_dict() |
| train(3, model, train_dataloader, optimizer, accelerator, scheduler) |
| self.assertNotEqual(scheduler_state, scheduler.state_dict()) |
|
|
| |
| accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_0")) |
| self.assertEqual(scheduler_state, scheduler.state_dict()) |
|
|