# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import random import tempfile import unittest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.utils import ProjectConfiguration, set_seed logger = logging.getLogger(__name__) def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2): "Generates a tuple of dummy DataLoaders to test with" def get_dataset(n_batches): x = torch.randn(batch_size * n_batches, 1) return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1)) train_dataset = get_dataset(n_train_batches) valid_dataset = get_dataset(n_valid_batches) train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4) return (train_dataloader, valid_dataloader) def train(num_epochs, model, dataloader, optimizer, accelerator, scheduler=None): "Trains for `num_epochs`" rands = [] for epoch in range(num_epochs): # Train quickly model.train() for batch in dataloader: x, y = batch outputs = model(x) loss = torch.nn.functional.mse_loss(outputs, y) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() rands.append(random.random()) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class DummyModel(nn.Module): "Simple model to do y=mx+b" def __init__(self): super().__init__() self.a = nn.Parameter(torch.randn(1)) self.b = nn.Parameter(torch.randn(1)) def forward(self, x): return x * self.a + self.b class CheckpointTest(unittest.TestCase): def test_with_save_limit(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(total_limit=1, project_dir=tmpdir, automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir)), 1) def test_can_resume_training_with_folder(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() # Train baseline accelerator = Accelerator() model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) # Save initial initial = os.path.join(tmpdir, "initial") accelerator.save_state(initial) (a, b) = model.a.item(), model.b.item() opt_state = optimizer.state_dict() ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator) (a1, b1) = model.a.item(), model.b.item() opt_state1 = optimizer.state_dict() # Train partially set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() accelerator = Accelerator() model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) accelerator.load_state(initial) (a2, b2) = model.a.item(), model.b.item() opt_state2 = optimizer.state_dict() self.assertEqual(a, a2) self.assertEqual(b, b2) self.assertEqual(opt_state, opt_state2) test_rands = train(2, model, train_dataloader, optimizer, accelerator) # Save everything checkpoint = os.path.join(tmpdir, "checkpoint") accelerator.save_state(checkpoint) # Load everything back in and make sure all states work accelerator.load_state(checkpoint) test_rands += train(1, model, train_dataloader, optimizer, accelerator) (a3, b3) = model.a.item(), model.b.item() opt_state3 = optimizer.state_dict() self.assertEqual(a1, a3) self.assertEqual(b1, b3) self.assertEqual(opt_state1, opt_state3) self.assertEqual(ground_truth_rands, test_rands) def test_can_resume_training(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) # Save initial accelerator.save_state() (a, b) = model.a.item(), model.b.item() opt_state = optimizer.state_dict() ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator) (a1, b1) = model.a.item(), model.b.item() opt_state1 = optimizer.state_dict() # Train partially set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=True) accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_0")) (a2, b2) = model.a.item(), model.b.item() opt_state2 = optimizer.state_dict() self.assertEqual(a, a2) self.assertEqual(b, b2) self.assertEqual(opt_state, opt_state2) test_rands = train(2, model, train_dataloader, optimizer, accelerator) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_1")) test_rands += train(1, model, train_dataloader, optimizer, accelerator) (a3, b3) = model.a.item(), model.b.item() opt_state3 = optimizer.state_dict() self.assertEqual(a1, a3) self.assertEqual(b1, b3) self.assertEqual(opt_state1, opt_state3) self.assertEqual(ground_truth_rands, test_rands) def test_invalid_registration(self): t = torch.tensor([1, 2, 3]) t1 = torch.tensor([2, 3, 4]) net = DummyModel() opt = torch.optim.Adam(net.parameters()) accelerator = Accelerator() with self.assertRaises(ValueError) as ve: accelerator.register_for_checkpointing(t, t1, net, opt) message = str(ve.exception) self.assertTrue("Item at index 0" in message) self.assertTrue("Item at index 1" in message) self.assertFalse("Item at index 2" in message) self.assertFalse("Item at index 3" in message) def test_with_scheduler(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) # Save initial accelerator.save_state() scheduler_state = scheduler.state_dict() train(3, model, train_dataloader, optimizer, accelerator, scheduler) self.assertNotEqual(scheduler_state, scheduler.state_dict()) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_0")) self.assertEqual(scheduler_state, scheduler.state_dict())