| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ |
| Training the Whisper model for sequence to sequence speech recognition via teacher-student distillation. |
| """ |
| |
|
|
| import logging |
| import os |
| import re |
| import shutil |
| import sys |
| import time |
| from dataclasses import dataclass, field |
| from functools import partial |
| from pathlib import Path |
| from typing import Any, Dict, List, Optional, Union |
|
|
| import datasets |
| import evaluate |
| import numpy as np |
| import torch |
| import torch.nn as nn |
| import transformers |
| from accelerate import Accelerator |
| from accelerate.logging import get_logger |
| from accelerate.utils import set_seed |
| from datasets import ( |
| DatasetDict, |
| IterableDataset, |
| IterableDatasetDict, |
| concatenate_datasets, |
| interleave_datasets, |
| load_dataset, |
| ) |
| from huggingface_hub import create_repo, get_full_repo_name, upload_folder |
| from torch.utils.data import DataLoader |
| from tqdm import tqdm |
| from transformers import ( |
| AddedToken, |
| HfArgumentParser, |
| Seq2SeqTrainingArguments, |
| WhisperConfig, |
| WhisperFeatureExtractor, |
| WhisperForConditionalGeneration, |
| WhisperProcessor, |
| WhisperTokenizerFast, |
| get_scheduler |
| ) |
| from transformers.modeling_outputs import BaseModelOutput |
| from transformers.models.whisper.english_normalizer import BasicTextNormalizer, EnglishTextNormalizer |
| from transformers.utils import check_min_version |
| from transformers.utils.versions import require_version |
|
|
|
|
| |
| check_min_version("4.34.0.dev0") |
|
|
| require_version("datasets>=2.14.6", "To fix: `pip install --upgrade datasets`") |
|
|
| logger = get_logger(__name__) |
|
|
|
|
| @dataclass |
| class ModelArguments: |
| """ |
| Arguments pertaining to which model/config/tokenizer we are going to distill from. |
| """ |
|
|
| model_name_or_path: str = field( |
| metadata={"help": "Path to pretrained Whisper model or model identifier from huggingface.co/models"} |
| ) |
| teacher_model_name_or_path: str = field( |
| metadata={"help": "Path to pretrained teacher model or model identifier from huggingface.co/models"} |
| ) |
| config_name: Optional[str] = field( |
| default=None, |
| metadata={"help": "Pretrained config name or path if not the same as model_name"}, |
| ) |
| tokenizer_name: Optional[str] = field( |
| default=None, |
| metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}, |
| ) |
| feature_extractor_name: Optional[str] = field( |
| default=None, |
| metadata={"help": "feature extractor name or path if not the same as model_name"}, |
| ) |
| cache_dir: Optional[str] = field( |
| default=None, |
| metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, |
| ) |
| use_fast_tokenizer: bool = field( |
| default=True, |
| metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, |
| ) |
| model_revision: str = field( |
| default="main", |
| metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, |
| ) |
| subfolder: str = field( |
| default="", |
| metadata={ |
| "help": "In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can" |
| "specify the folder name here." |
| }, |
| ) |
| token: str = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " |
| "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." |
| ) |
| }, |
| ) |
| attn_implementation: Optional[str] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "Which attention implementation to use in the encoder and decoder attention layers. Can be one of:\n" |
| "1. `eager` or `None`: default Transformers attention implementation.\n" |
| "2. `sdpa`: Flash Attention through PyTorch SDPA. Requires `torch>=2.1`. Recommended for hardware where Flash Attention 2 is not supported, e.g. Turing GPUs, (T4, RTX 2080).\n" |
| "3. `flash_attn_2`: Flash Attention 2 through the Flash Attention package https://github.com/Dao-AILab/flash-attention. **Always** recommended on supported hardware (Ampere, Ada, or Hopper GPUs, e.g., A100, RTX 3090, RTX 4090, H100)." |
| ) |
| }, |
| ) |
|
|
| def __post_init__(self): |
| if self.attn_implementation not in [None, "eager", "sdpa", "flash_attention_2"]: |
| raise ValueError( |
| f"Got `--attn_implementation={self.attn_implementation}`, which is an invalid attention type. Should be one of:\n" |
| "1. `eager` or `None`: default Transformers attention implementation.\n" |
| "2. `sdpa`: Flash Attention through PyTorch SDPA. Requires `torch>=2.1`. Recommended for hardware where Flash Attention 2 is not supported, e.g. Turing GPUs, (T4, RTX 2080).\n" |
| "3. `flash_attn_2`: Flash Attention 2 through the Flash Attention package https://github.com/Dao-AILab/flash-attention. **Always** recommended on supported hardware (Ampere, Ada, or Hopper GPUs, e.g., A100, RTX 3090, RTX 4090, H100)." |
| ) |
|
|
|
|
| @dataclass |
| class DataTrainingArguments: |
| """ |
| Arguments pertaining to what data we are going to input our model for training and eval. |
| """ |
|
|
| train_dataset_name: str = field( |
| default=None, |
| metadata={ |
| "help": "The name of the training dataset to use (via the datasets library). Load and combine " |
| "multiple datasets by separating dataset ids by a '+' symbol. For example, to load LibriSpeech " |
| "and Common Voice, set `train_dataset_name='librispeech_asr+common_voice'`." |
| }, |
| ) |
| train_dataset_config_name: Optional[str] = field( |
| default=None, |
| metadata={ |
| "help": "The configuration name of the training dataset to use (via the datasets library). Load and combine " |
| "multiple datasets by separating dataset configs by a '+' symbol. Note that the order of the configs should " |
| "match the order of the datasets." |
| }, |
| ) |
| train_dataset_samples: str = field( |
| default=None, |
| metadata={ |
| "help": "Number of samples in each dataset when loading multiple datasets with streaming mode. " |
| "Not required when using one dataset or non-streaming mode. The sample values provide the sampling " |
| "probability for each dataset. Setting them equal to the number of sample values ensures that every " |
| "sample from every dataset is used once per epoch." |
| }, |
| ) |
| eval_dataset_name: str = field( |
| default=None, |
| metadata={ |
| "help": "The name of the evaluation dataset to use (via the datasets library). Defaults to the training " |
| "dataset name if unspecified. Load multiple evaluation datasets by separating dataset " |
| "ids by a '+' symbol." |
| }, |
| ) |
| eval_dataset_config_name: Optional[str] = field( |
| default=None, |
| metadata={ |
| "help": "The configuration name of the evaluation dataset to use (via the datasets library). Defaults to the " |
| "training dataset config name if unspecified." |
| }, |
| ) |
| dataset_cache_dir: Optional[str] = field( |
| default=None, |
| metadata={"help": "Path to cache directory for saving and loading datasets"}, |
| ) |
| overwrite_cache: bool = field( |
| default=False, |
| metadata={"help": "Overwrite the cached training and evaluation sets"}, |
| ) |
| preprocessing_num_workers: Optional[int] = field( |
| default=None, |
| metadata={"help": "The number of processes to use for the preprocessing if using non-streaming mode."}, |
| ) |
| preprocessing_batch_size: Optional[int] = field( |
| default=256, |
| metadata={"help": "Number of examples per batch provided to the `prepare_dataset` function."}, |
| ) |
| max_train_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "For debugging purposes or quicker training, truncate the number of training examples to this value if set." |
| ) |
| }, |
| ) |
| max_eval_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set." |
| ) |
| }, |
| ) |
| audio_column_name: str = field( |
| default="audio", |
| metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"}, |
| ) |
| text_column_name: str = field( |
| default=None, |
| metadata={"help": "The name of the dataset column containing the text data in the training set."}, |
| ) |
| eval_text_column_name: str = field( |
| default="text", |
| metadata={"help": ("The name of the dataset column containing the text data in the evaluation set.")}, |
| ) |
| max_duration_in_seconds: float = field( |
| default=30.0, |
| metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"}, |
| ) |
| min_duration_in_seconds: float = field( |
| default=0.0, |
| metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}, |
| ) |
| max_label_length: int = field( |
| default=448, |
| metadata={"help": "Truncate transcriptions that are longer `max_label_length` tokens."}, |
| ) |
| pad_target_to_multiple_of: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "If set will pad the target sequence to a multiple of the provided" |
| " value. This is important to avoid triggering recompilations on TPU." |
| " If unspecified, will default to padding the targets to max length." |
| ) |
| }, |
| ) |
| preprocessing_only: bool = field( |
| default=False, |
| metadata={ |
| "help": ( |
| "Whether to only do data preprocessing and skip training. This is" |
| " especially useful when data preprocessing errors out in distributed" |
| " training due to timeout. In this case, one should run the" |
| " preprocessing in a non-distributed setup with" |
| " `preprocessing_only=True` so that the cached datasets can" |
| " consequently be loaded in distributed training" |
| ) |
| }, |
| ) |
| train_split_name: str = field( |
| default="train", |
| metadata={ |
| "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" |
| }, |
| ) |
| eval_split_name: str = field( |
| default="validation", |
| metadata={ |
| "help": ( |
| "The name of the evaluation data set split to use (via the datasets library). Defaults to 'validation'" |
| ) |
| }, |
| ) |
| streaming: bool = field( |
| default=True, |
| metadata={"help": "Whether to use Datasets' streaming mode to load and pre-process the data."}, |
| ) |
| wer_threshold: float = field( |
| default=None, |
| metadata={ |
| "help": "Filter training data with Whisper transcriptions that have greater than `wer_threshold` " |
| "WER with the normalised transcriptions. This only takes effect if training on pseudo-labels targets." |
| "If `--use_pseudo_labels=False`, then no WER filtering is performed, since we train directly on the text" |
| "transcriptions." |
| }, |
| ) |
| use_pseudo_labels: bool = field( |
| default=True, |
| metadata={ |
| "help": "Whether or not to use pseudo-label transcriptions as the targets. If True, the pseudo-labels " |
| "must be in the dataset column `whisper_transcript` from the previous pseudo-labelling step. This is " |
| "not currently yet configurable." |
| }, |
| ) |
| timestamp_probability: float = field( |
| default=0.2, metadata={"help": "Probability for training on timestamped tokens if the data contains it."} |
| ) |
| condition_on_prev_probability: float = field( |
| default=0.2, metadata={"help": "Probability for conditioning on the previous text example."} |
| ) |
| return_timestamps: bool = field( |
| default=False, metadata={"help": "Whether or not to predict timestamps in the generation step."} |
| ) |
| language: str = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "Language for multilingual distillation. This argument should be set for multilingual distillation " |
| "only. For English speech recognition, it should be left as `None`." |
| ) |
| }, |
| ) |
| task: str = field( |
| default="transcribe", |
| metadata={ |
| "help": "Task, either `transcribe` for speech recognition or `translate` for speech translation." |
| "This argument should be set for multilingual distillation only. For English speech recognition, it should be left as `None`." |
| }, |
| ) |
| wandb_project: str = field( |
| default="distil-whisper", |
| metadata={"help": "The name of the wandb project."}, |
| ) |
| wandb_name: str = field( |
| default=None, |
| metadata={"help": "The name of the wandb run."}, |
| ) |
| wandb_dir: str = field( |
| default="./wandb", |
| metadata={"help": "The dir where wandb metadata will be stored."}, |
| ) |
|
|
|
|
| @dataclass |
| class DistillationTrainingArguments(Seq2SeqTrainingArguments): |
| freeze_encoder: Optional[bool] = field( |
| default=False, |
| metadata={ |
| "help": ( |
| "Whether to freeze the entire encoder model. Only recommended when the entire encoder has been " |
| "copied from the teacher model." |
| ) |
| }, |
| ) |
| freeze_decoder: Optional[bool] = field( |
| default=False, |
| metadata={ |
| "help": ( |
| "Whether to freeze the entire decoder model. Note that the decoder input embeddings are **not** frozen, since they are tied to the LM head." |
| ) |
| }, |
| ) |
| freeze_embed_positions: Optional[bool] = field( |
| default=False, |
| metadata={"help": "Whether to freeze the decoder embedding positions."}, |
| ) |
| temperature: Optional[float] = field( |
| default=2.0, metadata={"help": "Temperature to anneal the logits when computing the softmax."} |
| ) |
| kl_weight: Optional[float] = field( |
| default=1.0, |
| metadata={ |
| "help": ( |
| "Weighting assigned to the MSE loss in the KD formulation. MSE loss is " |
| "computed between the teacher-student hidden states and attentions." |
| ) |
| }, |
| ) |
| dtype: Optional[str] = field( |
| default="float32", |
| metadata={ |
| "help": ( |
| "The data type (dtype) in which to run training. One of `float32` (full-precision), " |
| "`float16` or `bfloat16` (both half-precision)." |
| ) |
| }, |
| ) |
|
|
|
|
| @dataclass |
| class DataCollatorSpeechSeq2SeqWithPadding: |
| """ |
| Data collator that will dynamically pad the inputs received. |
| Args: |
| processor ([`Wav2Vec2Processor`]) |
| The processor used for proccessing the data. |
| decoder_start_token_id (:obj: `int`) |
| The start-of-sequence token id of the decoder. |
| decoder_prev_token_id (:obj: `int`) |
| The start-of-prompt token id of the decoder |
| input_padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): |
| Select a strategy to pad the returned input sequences (according to the model's padding side and padding index) |
| among: |
| * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single |
| sequence if provided). |
| * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the |
| maximum acceptable input length for the model if that argument is not provided. |
| * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of |
| different lengths). |
| target_padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): |
| Select a strategy to pad the returned target sequences (according to the model's padding side and padding index). |
| See above for details. |
| max_target_length (:obj:`int`, `optional`): |
| Maximum length of the ``labels`` of the returned list and optionally padding length (see above). |
| """ |
|
|
| processor: Any |
| decoder_start_token_id: int |
| decoder_prev_token_id: int |
| input_padding: Union[bool, str] = "max_length" |
| target_padding: Union[bool, str] = "max_length" |
| max_target_length: Optional[int] = None |
|
|
| def __call__(self, features: List[Dict[str, Union[List[int], np.ndarray]]]) -> Dict[str, np.ndarray]: |
| |
| |
|
|
| |
| input_features = {"input_features": [feature["input_features"] for feature in features]} |
| label_features = {"input_ids": [feature["labels"] for feature in features]} |
|
|
| |
| batch = self.processor.feature_extractor.pad( |
| input_features, |
| padding=self.input_padding, |
| return_tensors="pt", |
| ) |
|
|
| labels_batch = self.processor.tokenizer.pad( |
| label_features, |
| max_length=self.max_target_length, |
| padding=self.target_padding, |
| return_tensors="pt", |
| ) |
|
|
| |
| labels = labels_batch["input_ids"] |
| decoder_input_ids = labels[:, :-1] |
| labels = labels[:, 1:] |
| labels_mask = labels_batch.attention_mask[:, 1:] |
|
|
| |
| labels = labels.masked_fill(labels_mask.ne(1), -100) |
|
|
| |
| bos_index = torch.argmax((labels == self.decoder_start_token_id).long(), dim=1) |
| bos_index = torch.where(bos_index > 0, bos_index + 1, bos_index) |
| prompt_mask = torch.arange(labels.shape[1]) < bos_index[:, None] |
| labels = torch.where(prompt_mask, -100, labels) |
|
|
| batch["labels"] = labels |
| batch["decoder_input_ids"] = decoder_input_ids |
|
|
| return batch |
|
|
|
|
| def log_metric( |
| accelerator, |
| metrics: Dict, |
| train_time: float, |
| step: int, |
| epoch: int, |
| learning_rate: float = None, |
| prefix: str = "train", |
| ): |
| """Helper function to log all training/evaluation metrics with the correct prefixes and styling.""" |
| log_metrics = {} |
| for k, v in metrics.items(): |
| log_metrics[f"{prefix}/{k}"] = v |
| log_metrics[f"{prefix}/time"] = train_time |
| log_metrics[f"{prefix}/epoch"] = epoch |
| if learning_rate is not None: |
| log_metrics[f"{prefix}/learning_rate"] = learning_rate |
| accelerator.log(log_metrics, step=step) |
|
|
|
|
| def log_pred( |
| accelerator, |
| pred_str: List[str], |
| label_str: List[str], |
| norm_pred_str: List[str], |
| norm_label_str: List[str], |
| step: int, |
| prefix: str = "eval", |
| num_lines: int = 200000, |
| ): |
| """Helper function to log target/predicted transcriptions to weights and biases (wandb).""" |
| if accelerator.is_main_process: |
| wandb_tracker = accelerator.get_tracker("wandb") |
| |
| cur_step_pretty = f"{int(step // 1000)}k" if step > 1000 else step |
| prefix_pretty = prefix.replace("/", "-") |
|
|
| |
| str_data = [[label_str[i], pred_str[i], norm_label_str[i], norm_pred_str[i]] for i in range(len(pred_str))] |
| |
| wandb_tracker.log_table( |
| table_name=f"predictions/{prefix_pretty}-step-{cur_step_pretty}", |
| columns=["Target", "Pred", "Norm Target", "Norm Pred"], |
| data=str_data[:num_lines], |
| step=step, |
| ) |
|
|
| |
| str_data = np.asarray(str_data) |
| str_data_incorrect = str_data[str_data[:, -2] != str_data[:, -1]] |
| |
| wandb_tracker.log_table( |
| table_name=f"incorrect_predictions/{prefix_pretty}-step-{cur_step_pretty}", |
| columns=["Target", "Pred", "Norm Target", "Norm Pred"], |
| data=str_data_incorrect[:num_lines], |
| step=step, |
| ) |
|
|
|
|
| def convert_dataset_str_to_list( |
| dataset_names, |
| dataset_config_names, |
| splits=None, |
| text_column_names=None, |
| dataset_samples=None, |
| default_split="train", |
| ) -> List[Dict]: |
| """ |
| Given three lists of dataset names, configs and splits, this function groups the corresponding |
| names/configs/splits. Each dataset is assigned a unique dictionary with these metadata values, and the |
| function returns a list of dictionaries, one for each dataset. |
| """ |
| if isinstance(dataset_names, str): |
| dataset_names = dataset_names.split("+") |
| dataset_config_names = dataset_config_names.split("+") if dataset_config_names is not None else None |
| splits = splits.split("+") if splits is not None else None |
| text_column_names = text_column_names.split("+") if text_column_names is not None else None |
| dataset_samples = dataset_samples.split("+") if dataset_samples is not None else None |
|
|
| |
| if dataset_config_names is not None and len(dataset_names) != len(dataset_config_names): |
| raise ValueError( |
| f"Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and" |
| f" {len(dataset_config_names)} configs." |
| ) |
|
|
| if splits is not None and len(splits) != len(dataset_names): |
| raise ValueError( |
| f"Ensure one split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits." |
| ) |
|
|
| if text_column_names is not None and len(text_column_names) != len(dataset_names): |
| raise ValueError( |
| f"Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and" |
| f" {len(text_column_names)} text column names." |
| ) |
|
|
| if dataset_samples is not None: |
| if len(dataset_samples) != len(dataset_names): |
| raise ValueError( |
| f"Ensure one sample is passed for each dataset, got {len(dataset_names)} datasets and " |
| f"{len(dataset_samples)} samples." |
| ) |
| dataset_samples = [float(ds_sample) for ds_sample in dataset_samples] |
| else: |
| dataset_samples = [None] * len(dataset_names) |
|
|
| dataset_config_names = ( |
| dataset_config_names if dataset_config_names is not None else ["default" for _ in range(len(dataset_names))] |
| ) |
| text_column_names = ( |
| text_column_names if text_column_names is not None else ["text" for _ in range(len(dataset_names))] |
| ) |
| splits = splits if splits is not None else [default_split for _ in range(len(dataset_names))] |
|
|
| dataset_names_dict = [] |
| for i, ds_name in enumerate(dataset_names): |
| dataset_names_dict.append( |
| { |
| "name": ds_name, |
| "config": dataset_config_names[i], |
| "split": splits[i], |
| "text_column_name": text_column_names[i], |
| "samples": dataset_samples[i], |
| } |
| ) |
| return dataset_names_dict |
|
|
|
|
| def load_multiple_datasets( |
| dataset_names: Union[List, str], |
| dataset_config_names: Union[List, str], |
| splits: Optional[Union[List, str]] = None, |
| text_column_names: Optional[List] = None, |
| sampling_rate: Optional[int] = 16000, |
| stopping_strategy: Optional[str] = "first_exhausted", |
| dataset_samples: Optional[Union[List, np.array]] = None, |
| streaming: Optional[bool] = True, |
| seed: Optional[int] = None, |
| accelerator: Optional[Accelerator] = None, |
| use_pseudo_labels: float = None, |
| **kwargs, |
| ) -> IterableDataset: |
| dataset_names_dict = convert_dataset_str_to_list( |
| dataset_names, dataset_config_names, splits, text_column_names, dataset_samples |
| ) |
|
|
| if dataset_samples is not None: |
| dataset_samples = [ds_dict["samples"] for ds_dict in dataset_names_dict] |
| probabilities = np.array(dataset_samples) / np.sum(dataset_samples) |
| else: |
| probabilities = None |
|
|
| all_datasets = [] |
| |
| for dataset_dict in tqdm( |
| dataset_names_dict, |
| desc="Combining datasets...", |
| disable=not accelerator.is_local_main_process if accelerator is not None else False, |
| ): |
| dataset = load_dataset( |
| dataset_dict["name"], |
| dataset_dict["config"], |
| split=dataset_dict["split"], |
| streaming=streaming, |
| **kwargs, |
| ) |
| |
| dataset = dataset.cast_column("audio", datasets.features.Audio(sampling_rate)) |
| dataset_features = dataset.features.keys() |
| columns_to_keep = {"audio", "text"} |
|
|
| if dataset_dict["text_column_name"] not in dataset_features: |
| raise ValueError( |
| f"Text column name {dataset_dict['text_column_name']} not found in dataset" |
| f" '{dataset_dict['name']}'. Make sure to set `--text_column_name` to the" |
| f" correct text column - one of {', '.join(dataset_features)}." |
| ) |
|
|
| |
| if dataset_dict["text_column_name"] != "text": |
| dataset = dataset.rename_column(dataset_dict["text_column_name"], "text") |
|
|
| if use_pseudo_labels: |
| if "whisper_transcript" not in dataset_features: |
| raise ValueError( |
| f"Pseudo-label column `whisper_transcript` not found in dataset {dataset_dict['name']}. Ensure" |
| "pseudo-labels are present in the dataset under this column name, or train directly on the text " |
| "labels by setting `--use_pseudo_labels=False` and defining the appropriate `--text_column_name`." |
| ) |
| columns_to_keep.add("whisper_transcript") |
|
|
| if "condition_on_prev" in dataset_features: |
| columns_to_keep.add("condition_on_prev") |
|
|
| dataset_features = dataset.features.keys() |
| dataset = dataset.remove_columns(set(dataset_features - columns_to_keep)) |
| all_datasets.append(dataset) |
|
|
| if len(all_datasets) == 1: |
| |
| return all_datasets[0] |
|
|
| if streaming: |
| interleaved_dataset = interleave_datasets( |
| all_datasets, |
| stopping_strategy=stopping_strategy, |
| probabilities=probabilities, |
| seed=seed, |
| ) |
| else: |
| interleaved_dataset = concatenate_datasets(all_datasets) |
|
|
| return interleaved_dataset |
|
|
|
|
| def sorted_checkpoints(output_dir=None, checkpoint_prefix="checkpoint") -> List[str]: |
| """Helper function to sort saved checkpoints from oldest to newest.""" |
| ordering_and_checkpoint_path = [] |
|
|
| glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)] |
|
|
| for path in glob_checkpoints: |
| regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path) |
| if regex_match is not None and regex_match.groups() is not None: |
| ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) |
|
|
| checkpoints_sorted = sorted(ordering_and_checkpoint_path) |
| checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] |
| return checkpoints_sorted |
|
|
|
|
| def rotate_checkpoints(save_total_limit=None, output_dir=None, checkpoint_prefix="checkpoint") -> None: |
| """Helper function to delete old checkpoints.""" |
| if save_total_limit is None or save_total_limit <= 0: |
| return |
| |
| checkpoints_sorted = sorted_checkpoints(output_dir=output_dir, checkpoint_prefix=checkpoint_prefix) |
| if len(checkpoints_sorted) <= save_total_limit: |
| return |
|
|
| number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit) |
| checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] |
| for checkpoint in checkpoints_to_be_deleted: |
| logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") |
| shutil.rmtree(checkpoint, ignore_errors=True) |
|
|
|
|
| _RE_CHECKPOINT = re.compile(r"^checkpoint-(\d+)-epoch-(\d+)$") |
|
|
|
|
| def get_last_checkpoint(folder): |
| content = os.listdir(folder) |
| checkpoints = [ |
| path |
| for path in content |
| if _RE_CHECKPOINT.search(path) is not None and os.path.isdir(os.path.join(folder, path)) |
| ] |
| if len(checkpoints) == 0: |
| return |
| return os.path.join(folder, max(checkpoints, key=lambda x: int(_RE_CHECKPOINT.search(x).groups()[0]))) |
|
|
|
|
| def get_parameter_names(model, forbidden_layer_types, forbidden_module=None): |
| """ |
| Returns the names of the model parameters that are not inside a forbidden layer or forbidden module. |
| Can be used to get a subset of parameter names for decay masks, or to exclude parameters from an optimiser |
| (e.g. if the module is frozen). |
| """ |
| result = [] |
| for name, child in model.named_children(): |
| result += [ |
| f"{name}.{n}" |
| for n in get_parameter_names(child, forbidden_layer_types, forbidden_module) |
| if not ( |
| isinstance(child, tuple(forbidden_layer_types)) |
| or (child in tuple(forbidden_module) if forbidden_module is not None else False) |
| ) |
| ] |
| |
| result += list(model._parameters.keys()) |
| return result |
|
|
|
|
| def main(): |
| |
| |
| parser = HfArgumentParser((ModelArguments, DataTrainingArguments, DistillationTrainingArguments)) |
|
|
| if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): |
| |
| |
| model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) |
| else: |
| model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
|
|
| |
| |
| |
| |
| |
| if training_args.dtype == "float16": |
| mixed_precision = "fp16" |
| teacher_dtype = torch.float16 |
| elif training_args.dtype == "bfloat16": |
| mixed_precision = "bf16" |
| teacher_dtype = torch.bfloat16 |
| else: |
| mixed_precision = "no" |
| teacher_dtype = torch.float32 |
|
|
| accelerator = Accelerator( |
| gradient_accumulation_steps=training_args.gradient_accumulation_steps, |
| mixed_precision=mixed_precision, |
| log_with=training_args.report_to, |
| project_dir=training_args.output_dir, |
| ) |
|
|
| accelerator.init_trackers( |
| project_name=data_args.wandb_project, |
| init_kwargs={ |
| "wandb": {"name": data_args.wandb_name, |
| "dir": data_args.wandb_dir} |
| } |
|
|
| ) |
|
|
| |
| |
| logging.basicConfig( |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| datefmt="%m/%d/%Y %H:%M:%S", |
| level=logging.INFO, |
| ) |
| |
| logger.warning( |
| f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " |
| f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" |
| ) |
|
|
| |
| if accelerator.is_local_main_process: |
| datasets.utils.logging.set_verbosity_warning() |
| transformers.utils.logging.set_verbosity_info() |
| else: |
| datasets.utils.logging.set_verbosity_error() |
| transformers.utils.logging.set_verbosity_error() |
| logger.info("Training/evaluation parameters %s", training_args) |
|
|
| |
| last_checkpoint = None |
| if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: |
| last_checkpoint = get_last_checkpoint(training_args.output_dir) |
| if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: |
| raise ValueError( |
| f"Output directory ({training_args.output_dir}) already exists and is not empty. " |
| "Use --overwrite_output_dir to overcome." |
| ) |
| elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: |
| logger.info( |
| f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " |
| "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." |
| ) |
|
|
| |
| if accelerator.is_main_process: |
| if training_args.push_to_hub: |
| if training_args.hub_model_id is None: |
| repo_name = get_full_repo_name( |
| Path(training_args.output_dir).absolute().name, |
| token=training_args.hub_token, |
| ) |
| else: |
| repo_name = training_args.hub_model_id |
| create_repo(repo_name, exist_ok=True, token=training_args.hub_token) |
|
|
| with open(os.path.join(training_args.output_dir, ".gitignore"), "w+") as gitignore: |
| if "wandb" not in gitignore: |
| gitignore.write("wandb\n") |
| elif training_args.output_dir is not None: |
| os.makedirs(training_args.output_dir, exist_ok=True) |
| accelerator.wait_for_everyone() |
|
|
| |
| raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() |
|
|
| |
| set_seed(training_args.seed) |
|
|
| if training_args.do_train: |
| raw_datasets["train"] = load_multiple_datasets( |
| data_args.train_dataset_name, |
| data_args.train_dataset_config_name, |
| splits=data_args.train_split_name, |
| text_column_names=data_args.text_column_name, |
| use_pseudo_labels=data_args.use_pseudo_labels, |
| streaming=data_args.streaming, |
| dataset_samples=data_args.train_dataset_samples, |
| seed=training_args.seed, |
| accelerator=accelerator, |
| cache_dir=data_args.dataset_cache_dir, |
| token=model_args.token, |
| ) |
| raw_datasets_train_features = list(raw_datasets["train"].features.keys()) |
|
|
| if training_args.do_eval: |
| dataset_names_dict = convert_dataset_str_to_list( |
| data_args.eval_dataset_name if data_args.eval_dataset_name else data_args.train_dataset_name, |
| ( |
| data_args.eval_dataset_config_name |
| if data_args.eval_dataset_config_name |
| else data_args.train_dataset_config_name |
| ), |
| splits=data_args.eval_split_name, |
| text_column_names=data_args.eval_text_column_name, |
| ) |
| all_eval_splits = [] |
| if len(dataset_names_dict) == 1: |
| |
| dataset_dict = dataset_names_dict[0] |
| all_eval_splits.append("eval") |
| raw_datasets["eval"] = load_dataset( |
| dataset_dict["name"], |
| dataset_dict["config"], |
| split=dataset_dict["split"], |
| cache_dir=data_args.dataset_cache_dir, |
| token=model_args.token, |
| streaming=data_args.streaming, |
| ) |
| if data_args.eval_text_column_name != "text": |
| raw_datasets["eval"] = raw_datasets["eval"].rename_column(data_args.eval_text_column_name, "text") |
| else: |
| |
| for dataset_dict in dataset_names_dict: |
| if dataset_dict["name"] == "esb/diagnostic-dataset": |
| |
| pretty_name = f"{dataset_dict['config']}-diagnostic/{dataset_dict['split']}" |
| else: |
| pretty_name = f"{dataset_dict['name'].split('/')[-1]}/{dataset_dict['split'].replace('.', '-')}" |
| all_eval_splits.append(pretty_name) |
| raw_datasets[pretty_name] = load_dataset( |
| dataset_dict["name"], |
| dataset_dict["config"], |
| split=dataset_dict["split"], |
| cache_dir=data_args.dataset_cache_dir, |
| token=model_args.token, |
| streaming=data_args.streaming, |
| ) |
| |
| if dataset_dict["text_column_name"] != "text": |
| raw_datasets[pretty_name] = raw_datasets[pretty_name].rename_column( |
| dataset_dict["text_column_name"], "text" |
| ) |
| raw_datasets[pretty_name] = raw_datasets[pretty_name].remove_columns( |
| set(raw_datasets[pretty_name].features.keys()) - {"audio", "text"} |
| ) |
|
|
| if not training_args.do_train and not training_args.do_eval: |
| raise ValueError( |
| "Cannot not train and not do evaluation. At least one of training or evaluation has to be performed." |
| ) |
|
|
| |
| config = WhisperConfig.from_pretrained( |
| (model_args.config_name if model_args.config_name else model_args.model_name_or_path), |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| ) |
| feature_extractor = WhisperFeatureExtractor.from_pretrained( |
| (model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path), |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| ) |
| tokenizer = WhisperTokenizerFast.from_pretrained( |
| (model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), |
| cache_dir=model_args.cache_dir, |
| use_fast=model_args.use_fast_tokenizer, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| ) |
|
|
| |
| timestamps = [AddedToken("<|%.2f|>" % (i * 0.02), lstrip=False, rstrip=False) for i in range(1500 + 1)] |
| tokenizer.add_tokens(timestamps) |
|
|
| |
| |
| teacher_model = WhisperForConditionalGeneration.from_pretrained( |
| model_args.teacher_model_name_or_path, |
| cache_dir=model_args.cache_dir, |
| token=model_args.token, |
| low_cpu_mem_usage=True, |
| torch_dtype=teacher_dtype, |
| attn_implementation=model_args.attn_implementation, |
| ) |
|
|
| student_model = WhisperForConditionalGeneration.from_pretrained( |
| model_args.model_name_or_path, |
| config=config, |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| subfolder=model_args.subfolder, |
| token=model_args.token, |
| low_cpu_mem_usage=True, |
| attn_implementation=model_args.attn_implementation, |
| ) |
|
|
| if student_model.config.decoder_start_token_id is None or teacher_model.config.decoder_start_token_id is None: |
| raise ValueError( |
| f"Make sure that `config.decoder_start_token_id` is correctly defined for both the " |
| f"student and teacher model. Got {student_model.config.decoder_start_token_id} for the " |
| f"student and {teacher_model.config.decoder_start_token_id} for the teacher." |
| ) |
|
|
| |
| if training_args.gradient_checkpointing: |
| student_model.gradient_checkpointing_enable() |
|
|
| def set_trainable_parameters(module, requires_grad=False): |
| for param in module.parameters(): |
| param.requires_grad = requires_grad |
| module._requires_grad = requires_grad |
|
|
| |
| if training_args.freeze_encoder: |
| set_trainable_parameters(student_model.model.encoder, requires_grad=False) |
| student_model.model.encoder.gradient_checkpointing = False |
| |
| if training_args.freeze_decoder: |
| set_trainable_parameters(student_model.model.decoder, requires_grad=False) |
| student_model.model.decoder.gradient_checkpointing = False |
| |
| set_trainable_parameters(student_model.proj_out, requires_grad=True) |
| |
|
|
| if training_args.freeze_embed_positions: |
| |
| set_trainable_parameters(student_model.model.decoder.embed_positions, requires_grad=False) |
| if student_model.model.decoder.gradient_checkpointing: |
| logger.info( |
| "Disabling gradient checkpointing in the decoder since it's incompatible with `freeze_embed_positions`." |
| ) |
| |
| logger.info( |
| f"Number of trainable parameters: {sum(p.numel() for p in student_model.parameters() if p.requires_grad):.3e}" |
| ) |
|
|
| share_hidden_states = training_args.freeze_encoder and student_model.config.d_model == teacher_model.config.d_model |
| if share_hidden_states: |
| |
| teacher_model.model.encoder = student_model.model.encoder |
|
|
| if hasattr(teacher_model.generation_config, "is_multilingual") and teacher_model.generation_config.is_multilingual: |
| |
| is_multilingual = True |
| tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task, predict_timestamps=False) |
| student_model.generation_config.update( |
| **{ |
| "language": data_args.language, |
| "task": data_args.task, |
| } |
| ) |
| elif data_args.language is not None: |
| raise ValueError( |
| "Setting language token for an English-only checkpoint is not permitted. The language argument should " |
| "only be set for multilingual checkpoints." |
| ) |
| else: |
| is_multilingual = False |
|
|
| |
| if accelerator.is_main_process: |
| feature_extractor.save_pretrained(training_args.output_dir) |
| tokenizer.save_pretrained(training_args.output_dir) |
| |
| config.save_pretrained(training_args.output_dir) |
| student_model.generation_config.save_pretrained(training_args.output_dir) |
|
|
| accelerator.wait_for_everyone() |
| processor = WhisperProcessor.from_pretrained(training_args.output_dir) |
|
|
| |
| |
| sampling_rate = feature_extractor.sampling_rate |
| raw_datasets = raw_datasets.cast_column( |
| data_args.audio_column_name, |
| datasets.features.Audio(sampling_rate=sampling_rate), |
| ) |
|
|
| |
| |
| max_input_length = int(data_args.max_duration_in_seconds * sampling_rate) |
| min_input_length = int(data_args.min_duration_in_seconds * sampling_rate) |
| max_label_length = ( |
| data_args.max_label_length if data_args.max_label_length is not None else student_model.config.max_length |
| ) |
|
|
| timestamp_probability = data_args.timestamp_probability |
| condition_on_prev_probability = data_args.condition_on_prev_probability |
| return_timestamps = data_args.return_timestamps if timestamp_probability > 0 else False |
|
|
| timestamp_ids = tokenizer.timestamp_ids() |
| timestamp_begin = tokenizer.all_special_ids[-1] |
| timestamp_position = 3 if is_multilingual else 1 |
|
|
| decoder_start_token_id = student_model.config.decoder_start_token_id |
| decoder_prev_token_id = tokenizer.all_special_ids[-3] |
| prompt_cutoff_length = max_label_length // 2 |
|
|
| num_workers = data_args.preprocessing_num_workers |
| dataloader_num_workers = training_args.dataloader_num_workers |
| prefetch_factor = training_args.dataloader_prefetch_factor |
|
|
| metric = evaluate.load("wer") |
| normalizer = ( |
| BasicTextNormalizer() |
| if data_args.language is not None |
| else EnglishTextNormalizer(tokenizer.english_spelling_normalizer) |
| ) |
| wer_threshold = data_args.wer_threshold |
| use_pseudo_labels = data_args.use_pseudo_labels |
| train_text_column_name = "whisper_transcript" if use_pseudo_labels else "text" |
|
|
| |
| if training_args.do_train and data_args.max_train_samples is not None: |
| raw_datasets["train"] = ( |
| raw_datasets["train"].take(data_args.max_train_samples) |
| if data_args.streaming |
| else raw_datasets["train"].select(range(data_args.max_train_samples)) |
| ) |
|
|
| if training_args.do_eval and data_args.max_eval_samples is not None: |
| for eval_split in all_eval_splits: |
| raw_datasets[eval_split] = ( |
| raw_datasets[eval_split].take(data_args.max_eval_samples) |
| if data_args.streaming |
| else raw_datasets[eval_split].select(range(data_args.max_eval_samples)) |
| ) |
|
|
| |
| def is_wer_in_range(ground_truth, whisper_transcript): |
| norm_ground_truth = normalizer(ground_truth) |
| if whisper_transcript is not None and whisper_transcript.upper() == whisper_transcript: |
| |
| return False |
| elif len(norm_ground_truth) > 0 and whisper_transcript is not None: |
| norm_whisper_transcript = normalizer(whisper_transcript) |
| wer = 100 * metric.compute(predictions=[norm_whisper_transcript], references=[norm_ground_truth]) |
| return wer < wer_threshold |
| else: |
| |
| return False |
|
|
| filter_by_wer_threshold = partial( |
| raw_datasets["train"].filter, |
| function=is_wer_in_range, |
| input_columns=["text", "whisper_transcript"], |
| ) |
|
|
| if wer_threshold is not None and use_pseudo_labels: |
| with accelerator.main_process_first(): |
| raw_datasets["train"] = ( |
| filter_by_wer_threshold(num_proc=num_workers, desc="filtering train dataset by wer") |
| if not data_args.streaming |
| else filter_by_wer_threshold() |
| ) |
|
|
| |
| def prepare_train_dataset(batch): |
| """ |
| Pre-process the raw dataset in a three stage process: |
| 1. Convert the audio arrays to log-mel spectrogram inputs |
| 2. Possibly filter the timestamp tokens from the token ids (depending on the timestamp probability) |
| 3. Possibly add prompt tokens if conditioning on previous text (depending on the conditioning probability) |
| """ |
| |
| audio = [sample["array"] for sample in batch["audio"]] |
| inputs = feature_extractor(audio, sampling_rate=sampling_rate) |
| batch["input_features"] = inputs.input_features |
| batch["input_length"] = [len(sample) for sample in audio] |
|
|
| |
| input_str_batched = batch[train_text_column_name] |
| condition_on_prev_batched = batch.get("condition_on_prev", len(input_str_batched) * [None]) |
|
|
| all_token_ids = [] |
| all_token_ids_unprompted = [] |
| for prev_ids, input_str in zip(condition_on_prev_batched, input_str_batched): |
| token_ids = tokenizer(input_str, add_special_tokens=not use_pseudo_labels).input_ids |
|
|
| |
| has_timestamps = len(set(token_ids) & set(timestamp_ids)) > 0 |
| if has_timestamps: |
| |
| predict_timestamps = bool(np.random.binomial(1, timestamp_probability)) |
| if not predict_timestamps: |
| |
| token_ids = [token for token in token_ids if token < timestamp_begin] |
| token_ids.insert(timestamp_position, timestamp_begin) |
|
|
| all_token_ids_unprompted.append(token_ids) |
| |
| condition_on_prev = bool(np.random.binomial(1, condition_on_prev_probability)) |
| if not condition_on_prev: |
| prev_ids = None |
| elif "condition_on_prev" not in batch and len(all_token_ids_unprompted) > 1: |
| |
| prev_ids = all_token_ids_unprompted[-2] |
|
|
| if prev_ids is not None: |
| if has_timestamps and not predict_timestamps: |
| |
| prev_ids = [token for token in prev_ids if token < timestamp_begin] |
|
|
| |
| if len(prev_ids) > prompt_cutoff_length: |
| prev_ids = prev_ids[-prompt_cutoff_length + 1 :] |
| prev_ids = [decoder_prev_token_id] + prev_ids |
|
|
| |
| if len(prev_ids + token_ids) > max_label_length: |
| trim_length = len(prev_ids + token_ids) - max_label_length + 1 |
| prev_ids = prev_ids[trim_length:] |
| prev_ids = [decoder_prev_token_id] + prev_ids |
|
|
| token_ids = prev_ids + token_ids |
|
|
| all_token_ids.append(token_ids) |
|
|
| batch["labels"] = all_token_ids |
| return batch |
|
|
| def prepare_eval_dataset(batch): |
| |
| sample = batch["audio"] |
| inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"]) |
| batch["input_features"] = inputs.input_features[0] |
| batch["input_length"] = len(sample["array"]) |
|
|
| |
| input_str = batch["text"] |
| batch["labels"] = tokenizer(input_str).input_ids |
| return batch |
|
|
| vectorized_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() |
| if training_args.do_train: |
| |
| |
| |
| map_fn_train = partial( |
| raw_datasets["train"].map, |
| function=prepare_train_dataset, |
| remove_columns=raw_datasets_train_features, |
| batched=True, |
| batch_size=data_args.preprocessing_batch_size, |
| ) |
| with accelerator.main_process_first(): |
| vectorized_datasets["train"] = ( |
| map_fn_train(num_proc=num_workers, desc="preprocess train dataset") |
| if not data_args.streaming |
| else map_fn_train() |
| ) |
| if training_args.do_eval: |
| for eval_split in all_eval_splits: |
| raw_datasets_eval_features = list(raw_datasets[eval_split].features.keys()) |
| map_fn_eval = partial( |
| raw_datasets[eval_split].map, function=prepare_eval_dataset, remove_columns=raw_datasets_eval_features |
| ) |
| with accelerator.main_process_first(): |
| vectorized_datasets[eval_split] = ( |
| map_fn_eval(num_proc=num_workers, desc="preprocess eval dataset") |
| if not data_args.streaming |
| else map_fn_eval() |
| ) |
|
|
| |
| def is_audio_in_length_range(length): |
| return min_input_length < length < max_input_length |
|
|
| filter_by_audio_fn = partial( |
| vectorized_datasets.filter, function=is_audio_in_length_range, input_columns=["input_length"] |
| ) |
| with accelerator.main_process_first(): |
| vectorized_datasets = ( |
| filter_by_audio_fn(num_proc=num_workers, desc="filtering train dataset by audio length") |
| if not data_args.streaming |
| else filter_by_audio_fn() |
| ) |
|
|
| |
| def is_labels_in_length_range(labels): |
| return 0 < len(labels) <= max_label_length |
|
|
| filter_by_labels_fn = partial( |
| vectorized_datasets.filter, function=is_labels_in_length_range, input_columns=["labels"] |
| ) |
| with accelerator.main_process_first(): |
| vectorized_datasets = ( |
| filter_by_labels_fn(num_proc=num_workers, desc="filtering train dataset") |
| if not data_args.streaming |
| else filter_by_labels_fn() |
| ) |
|
|
| |
| |
| |
| |
| |
| |
| if data_args.preprocessing_only: |
| if data_args.streaming: |
| raise ValueError( |
| "When using streaming mode, dataset pre-processing is performed on the fly, hence there is no notion" |
| "of a cached pre-processed dataset. Remove the argument `--preprocessing_only` to run pre-processing " |
| "on the fly with streaming mode." |
| ) |
| cache = {k: v.cache_files for k, v in vectorized_datasets.items()} |
| logger.info(f"Data preprocessing finished. Files cached at {cache}.") |
| return |
|
|
| |
| def compute_metrics(preds, labels): |
| |
| for idx in range(len(labels)): |
| labels[idx][labels[idx] == -100] = tokenizer.pad_token_id |
|
|
| pred_str = tokenizer.batch_decode(preds, skip_special_tokens=True, decode_with_timestamps=return_timestamps) |
| |
| label_str = tokenizer.batch_decode(labels, skip_special_tokens=True) |
| wer_ortho = 100 * metric.compute(predictions=pred_str, references=label_str) |
|
|
| |
| norm_pred_str = [normalizer(pred) for pred in pred_str] |
| norm_label_str = [normalizer(label) for label in label_str] |
| |
| pred_str = [pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] |
| label_str = [label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] |
| |
| norm_pred_str = [norm_pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] |
| norm_label_str = [norm_label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] |
|
|
| wer = 100 * metric.compute(predictions=norm_pred_str, references=norm_label_str) |
| return {"wer": wer, "wer_ortho": wer_ortho}, pred_str, label_str, norm_pred_str, norm_label_str |
|
|
| |
| |
| per_device_train_batch_size = int(training_args.per_device_train_batch_size) |
| train_batch_size = per_device_train_batch_size * accelerator.num_processes |
| gradient_accumulation_steps = int(training_args.gradient_accumulation_steps) |
| per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) |
|
|
| if not data_args.streaming and training_args.max_steps < 0: |
| num_epochs = int(training_args.num_train_epochs) |
| steps_per_epoch = len(vectorized_datasets["train"]) // (train_batch_size * gradient_accumulation_steps) |
| total_train_steps = steps_per_epoch * num_epochs |
| elif training_args.max_steps > 0: |
| logger.info("max_steps is given, it will override any value given in num_train_epochs") |
| total_train_steps = int(training_args.max_steps) |
| if not data_args.streaming: |
| steps_per_epoch = len(vectorized_datasets["train"]) // (train_batch_size * gradient_accumulation_steps) |
| num_epochs = int(np.ceil(total_train_steps / steps_per_epoch)) |
| else: |
| |
| num_epochs = sys.maxsize |
| steps_per_epoch = total_train_steps |
| else: |
| raise ValueError("max_steps must be specified when training with a streaming (iterable) dataset") |
|
|
| if training_args.eval_steps is None: |
| logger.info( |
| f"eval_steps is not set, evaluating at the end of {'each epoch' if not data_args.streaming else 'training'}" |
| ) |
| eval_steps = steps_per_epoch |
| else: |
| eval_steps = training_args.eval_steps |
|
|
| |
| |
| forbidden_module = [ |
| module |
| for module, flag in [ |
| (student_model.model.encoder, training_args.freeze_encoder), |
| (student_model.model.decoder, training_args.freeze_decoder) |
| ] |
| if flag |
| ] or None |
|
|
| decay_parameters = get_parameter_names( |
| student_model, |
| [nn.LayerNorm], |
| forbidden_module=forbidden_module, |
| ) |
| decay_parameters = [name for name in decay_parameters if "bias" not in name] |
| optimizer_grouped_parameters = [ |
| { |
| "params": [param for name, param in student_model.named_parameters() if name in decay_parameters], |
| "weight_decay": training_args.weight_decay, |
| }, |
| { |
| "params": [param for name, param in student_model.named_parameters() if name not in decay_parameters], |
| "weight_decay": 0.0, |
| }, |
| ] |
| optimizer = torch.optim.AdamW( |
| params=optimizer_grouped_parameters, |
| lr=training_args.learning_rate, |
| betas=(training_args.adam_beta1, training_args.adam_beta2), |
| eps=training_args.adam_epsilon, |
| ) |
|
|
| |
| lr_scheduler = get_scheduler( |
| name=training_args.lr_scheduler_type, |
| optimizer=optimizer, |
| num_warmup_steps=training_args.warmup_steps * accelerator.num_processes, |
| num_training_steps=total_train_steps * accelerator.num_processes, |
| ) |
|
|
| data_collator = DataCollatorSpeechSeq2SeqWithPadding( |
| processor=processor, |
| decoder_start_token_id=decoder_start_token_id, |
| decoder_prev_token_id=decoder_prev_token_id, |
| input_padding="longest", |
| target_padding="max_length", |
| max_target_length=max_label_length, |
| ) |
|
|
| |
| |
| num_beams = ( |
| training_args.generation_num_beams |
| if training_args.generation_num_beams is not None |
| else getattr(student_model.generation_config, "num_beams", 1) |
| ) |
|
|
| gen_kwargs = { |
| "max_length": max_label_length, |
| "num_beams": num_beams, |
| "return_timestamps": return_timestamps, |
| } |
| if is_multilingual: |
| |
| gen_kwargs.update( |
| { |
| "language": data_args.language, |
| "task": data_args.task, |
| } |
| ) |
|
|
| |
| student_model, teacher_model, optimizer, lr_scheduler = accelerator.prepare( |
| student_model, teacher_model, optimizer, lr_scheduler |
| ) |
|
|
| def kl_divergence(target_distribution, log_predicted_distribution, labels): |
| kl_loss = nn.KLDivLoss(reduction="none") |
| divergence = kl_loss(log_predicted_distribution, target_distribution) |
| |
| padding_mask = labels >= 0 |
| padding_mask = padding_mask.unsqueeze(-1) |
| divergence = divergence * padding_mask |
| |
| divergence = divergence.sum() / padding_mask.sum() |
| return divergence |
|
|
| |
| def train_step( |
| batch, |
| temperature=2.0, |
| ): |
| student_model.train() |
| teacher_model.eval() |
|
|
| student_outputs = student_model(**batch) |
| with torch.no_grad(): |
| if share_hidden_states: |
| |
| |
| encoder_outputs = BaseModelOutput(student_outputs.encoder_last_hidden_state.to(dtype=teacher_dtype)) |
| teacher_outputs = teacher_model(encoder_outputs=encoder_outputs, labels=batch["labels"]) |
| else: |
| |
| teacher_outputs = teacher_model(**batch) |
|
|
| |
| ce_loss = student_outputs.loss |
| |
| teacher_distribution = nn.functional.softmax(teacher_outputs.logits / temperature, dim=-1) |
| |
| student_distribution = nn.functional.log_softmax(student_outputs.logits / temperature, dim=-1) |
| |
| kl_loss = kl_divergence(teacher_distribution, student_distribution, batch["labels"]) * temperature**2 |
|
|
| |
| loss = 0.8 * ce_loss + training_args.kl_weight * kl_loss |
| metrics = {"loss": loss, "ce_loss": ce_loss, "kl_loss": kl_loss} |
| return loss, metrics |
|
|
| |
| def eval_step(batch): |
| student_model.eval() |
| teacher_model.eval() |
|
|
| with torch.no_grad(): |
| student_outputs = student_model(**batch) |
| if share_hidden_states: |
| encoder_outputs = BaseModelOutput(student_outputs.encoder_last_hidden_state.to(dtype=teacher_dtype)) |
| teacher_outputs = teacher_model(encoder_outputs=encoder_outputs, labels=batch["labels"]) |
| else: |
| teacher_outputs = teacher_model(**batch) |
|
|
| |
| ce_loss = student_outputs.loss |
|
|
| |
| student_distribution = nn.functional.log_softmax(student_outputs.logits, dim=-1) |
| teacher_distribution = nn.functional.softmax(teacher_outputs.logits, dim=-1) |
| |
| kl_loss = kl_divergence(teacher_distribution, student_distribution, batch["labels"]) |
|
|
| |
| loss = 0.8 * ce_loss + training_args.kl_weight * kl_loss |
| metrics = {"loss": loss, "ce_loss": ce_loss, "kl_loss": kl_loss} |
| return metrics |
|
|
| def generate_step(batch): |
| student_model.eval() |
| output_ids = accelerator.unwrap_model(student_model).generate(batch["input_features"], **gen_kwargs) |
| output_ids = accelerator.pad_across_processes(output_ids, dim=1, pad_index=tokenizer.pad_token_id) |
| return output_ids |
|
|
| logger.info("***** Running training *****") |
| logger.info(f" Num examples = {total_train_steps * train_batch_size * gradient_accumulation_steps}") |
| if not data_args.streaming: |
| logger.info(f" Num epochs = {num_epochs}") |
| logger.info(" Instantaneous batch size per device =" f" {training_args.per_device_train_batch_size}") |
| logger.info(" Gradient accumulation steps =" f" {gradient_accumulation_steps}") |
| logger.info( |
| f" Total train batch size (w. parallel & distributed) = {train_batch_size * gradient_accumulation_steps}" |
| ) |
| logger.info(f" Total optimization steps = {total_train_steps}") |
|
|
| |
| train_time = 0 |
| train_start = time.time() |
| steps_trained_progress_bar = tqdm( |
| range(total_train_steps), desc="Train steps ... ", position=0, disable=not accelerator.is_local_main_process |
| ) |
| continue_training = True |
| epochs_trained = 0 |
| cur_step = 0 |
|
|
| checkpoint = None |
| if training_args.resume_from_checkpoint is not None: |
| checkpoint = training_args.resume_from_checkpoint |
| elif last_checkpoint is not None: |
| checkpoint = last_checkpoint |
|
|
| if checkpoint is not None: |
| accelerator.load_state(checkpoint) |
| |
| pattern = r"checkpoint-(\d+)-epoch-(\d+)" |
| match = re.search(pattern, checkpoint) |
| cur_step = int(match.group(1)) |
| epochs_trained = int(match.group(2)) |
|
|
| logger.info(" Continuing training from checkpoint, will skip to saved global_step") |
| logger.info(f" Continuing training from epoch {epochs_trained}") |
| logger.info(f" Continuing training from global step {cur_step}") |
|
|
| steps_trained_progress_bar.update(cur_step) |
|
|
| for epoch in range(0, epochs_trained): |
| vectorized_datasets["train"] = vectorized_datasets["train"].shuffle(training_args.seed) |
|
|
| if not data_args.streaming and training_args.max_steps < 0: |
| |
| resume_step = (cur_step - epochs_trained * steps_per_epoch) * gradient_accumulation_steps |
| else: |
| |
| |
| |
| resume_step = None |
| vectorized_datasets["train"] = vectorized_datasets["train"].shuffle(training_args.seed) |
| else: |
| resume_step = None |
|
|
| for epoch in range(epochs_trained, num_epochs): |
| vectorized_datasets["train"] = vectorized_datasets["train"].shuffle(training_args.seed) |
| train_dataloader = DataLoader( |
| vectorized_datasets["train"], |
| collate_fn=data_collator, |
| batch_size=per_device_train_batch_size, |
| num_workers=dataloader_num_workers, |
| prefetch_factor=prefetch_factor, |
| pin_memory=training_args.dataloader_pin_memory, |
| ) |
| train_dataloader = accelerator.prepare(train_dataloader) |
| if hasattr(train_dataloader, "dataset") and isinstance(train_dataloader.dataset, IterableDataset): |
| train_dataloader.dataset.set_epoch(epoch) |
|
|
| if resume_step is not None: |
| |
| train_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) |
| resume_step = None |
|
|
| for batch in train_dataloader: |
| with accelerator.accumulate(student_model): |
| loss, train_metric = train_step(batch, temperature=training_args.temperature) |
| accelerator.backward(loss) |
| if accelerator.sync_gradients: |
| accelerator.clip_grad_norm_(student_model.parameters(), training_args.max_grad_norm) |
| optimizer.step() |
| lr_scheduler.step() |
| optimizer.zero_grad() |
|
|
| |
| if accelerator.sync_gradients: |
| steps_trained_progress_bar.update(1) |
| cur_step += 1 |
|
|
| if cur_step % training_args.logging_steps == 0: |
| steps_trained_progress_bar.write( |
| f"Step... ({cur_step} / {total_train_steps} | Loss:" |
| f" {train_metric['loss']}, Learning Rate:" |
| f" {lr_scheduler.get_last_lr()[0]})" |
| ) |
| log_metric( |
| accelerator, |
| metrics=train_metric, |
| learning_rate=lr_scheduler.get_last_lr()[0], |
| train_time=train_time + time.time() - train_start, |
| step=cur_step, |
| epoch=epoch, |
| prefix="train", |
| ) |
|
|
| |
| if (cur_step % training_args.save_steps == 0) or cur_step == total_train_steps: |
| intermediate_dir = os.path.join(training_args.output_dir, f"checkpoint-{cur_step}-epoch-{epoch}") |
| accelerator.save_state(output_dir=intermediate_dir) |
| feature_extractor.save_pretrained(intermediate_dir) |
| tokenizer.save_pretrained(intermediate_dir) |
| config.save_pretrained(intermediate_dir) |
| |
| accelerator.unwrap_model(student_model).generation_config.save_pretrained(intermediate_dir) |
|
|
| accelerator.wait_for_everyone() |
| if accelerator.is_main_process: |
| rotate_checkpoints(training_args.save_total_limit, output_dir=training_args.output_dir) |
|
|
| if training_args.push_to_hub: |
| upload_folder( |
| folder_path=training_args.output_dir, |
| repo_id=repo_name, |
| repo_type="model", |
| commit_message=f"Saving train state of step {cur_step}", |
| ) |
|
|
| if training_args.do_eval and (cur_step % eval_steps == 0 or cur_step == total_train_steps): |
| train_time += time.time() - train_start |
| student_model.eval() |
| |
| for eval_split in all_eval_splits: |
| eval_metrics = [] |
| eval_preds = [] |
| eval_labels = [] |
| eval_start = time.time() |
|
|
| validation_dataloader = DataLoader( |
| vectorized_datasets[eval_split], |
| collate_fn=data_collator, |
| batch_size=per_device_eval_batch_size, |
| drop_last=False, |
| num_workers=dataloader_num_workers, |
| prefetch_factor=prefetch_factor, |
| pin_memory=training_args.dataloader_pin_memory, |
| ) |
| validation_dataloader = accelerator.prepare(validation_dataloader) |
|
|
| for batch in tqdm( |
| validation_dataloader, |
| desc=f"Evaluating {eval_split}...", |
| position=2, |
| disable=not accelerator.is_local_main_process, |
| ): |
| |
| eval_metric = eval_step(batch) |
| eval_metric = accelerator.gather_for_metrics(eval_metric) |
| eval_metrics.append(eval_metric) |
|
|
| |
| if training_args.predict_with_generate: |
| generated_ids = generate_step(batch) |
| |
| generated_ids, labels = accelerator.gather_for_metrics( |
| (generated_ids, batch["labels"]) |
| ) |
| eval_preds.extend(generated_ids) |
| eval_labels.extend(labels) |
|
|
| eval_time = time.time() - eval_start |
| |
| eval_metrics = { |
| key: torch.mean(torch.stack([d[key] for d in eval_metrics])) for key in eval_metrics[0] |
| } |
|
|
| |
| wer_desc = "" |
| if training_args.predict_with_generate: |
| wer_metric, pred_str, label_str, norm_pred_str, norm_label_str = compute_metrics( |
| eval_preds, eval_labels |
| ) |
| eval_metrics.update(wer_metric) |
| wer_desc = " ".join([f"Eval {key}: {value} |" for key, value in wer_metric.items()]) |
| log_pred( |
| accelerator, |
| pred_str, |
| label_str, |
| norm_pred_str, |
| norm_label_str, |
| step=cur_step, |
| prefix=eval_split, |
| ) |
|
|
| |
| steps_trained_progress_bar.write( |
| f"Eval results for step ({cur_step} / {total_train_steps} | Eval Loss: {eval_metrics['loss']} |" |
| f" {wer_desc})" |
| ) |
|
|
| log_metric( |
| accelerator, |
| metrics=eval_metrics, |
| train_time=eval_time, |
| step=cur_step, |
| epoch=epoch, |
| prefix=eval_split, |
| ) |
|
|
| |
| train_start = time.time() |
|
|
| |
| if cur_step == total_train_steps: |
|
|
| |
| student_model = accelerator.unwrap_model(student_model) |
| student_model.save_pretrained(training_args.output_dir) |
|
|
| if training_args.push_to_hub: |
| upload_folder( |
| folder_path=training_args.output_dir, |
| |
| repo_id='nullonesix/training', |
| repo_type="model", |
| commit_message=f"Saving final weights of step {cur_step}", |
| ) |
|
|
| continue_training = False |
| break |
|
|
| if not continue_training: |
| break |
|
|
| accelerator.end_training() |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|