| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ |
| Pseudo-labelling audio data using the Whisper model in preparation for distillation. |
| """ |
| import csv |
|
|
| |
| import logging |
| import os |
| import string |
| import sys |
| import time |
| from dataclasses import dataclass, field |
| from datetime import timedelta |
| from pathlib import Path |
| from typing import Any, Dict, List, Optional, Union |
|
|
| import datasets |
| import evaluate |
| import numpy as np |
| import torch |
| import transformers |
| from accelerate import Accelerator, InitProcessGroupKwargs |
| from accelerate.logging import get_logger |
| from datasets import ( |
| DatasetDict, |
| IterableDatasetDict, |
| load_dataset, |
| ) |
| from huggingface_hub import HfFolder, Repository, create_repo, get_full_repo_name |
| from torch.utils.data import DataLoader |
| from tqdm import tqdm |
| from transformers import ( |
| HfArgumentParser, |
| Seq2SeqTrainingArguments, |
| WhisperConfig, |
| WhisperFeatureExtractor, |
| WhisperForConditionalGeneration, |
| WhisperProcessor, |
| WhisperTokenizerFast, |
| ) |
| from transformers.models.whisper.english_normalizer import EnglishTextNormalizer |
| from transformers.utils import check_min_version |
| from transformers.utils.versions import require_version |
|
|
|
|
| |
| check_min_version("4.34.0.dev0") |
|
|
| require_version("datasets>=2.14.6", "To fix: `pip install --upgrade datasets`") |
|
|
| logger = get_logger(__name__) |
|
|
|
|
| @dataclass |
| class ModelArguments: |
| """ |
| Arguments pertaining to which model/config/tokenizer we are going to distill from. |
| """ |
|
|
| model_name_or_path: str = field( |
| metadata={"help": "Path to pretrained Whisper model or model identifier from huggingface.co/models"} |
| ) |
| config_name: Optional[str] = field( |
| default=None, |
| metadata={"help": "Pretrained config name or path if not the same as model_name"}, |
| ) |
| tokenizer_name: Optional[str] = field( |
| default=None, |
| metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}, |
| ) |
| feature_extractor_name: Optional[str] = field( |
| default=None, |
| metadata={"help": "feature extractor name or path if not the same as model_name"}, |
| ) |
| processor_name: Optional[str] = field( |
| default=None, |
| metadata={"help": "processor name or path if not the same as model_name"}, |
| ) |
| cache_dir: Optional[str] = field( |
| default=None, |
| metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, |
| ) |
| use_fast_tokenizer: bool = field( |
| default=True, |
| metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, |
| ) |
| model_revision: str = field( |
| default="main", |
| metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, |
| ) |
| subfolder: str = field( |
| default="", |
| metadata={ |
| "help": "In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can" |
| "specify the folder name here." |
| }, |
| ) |
| token: str = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " |
| "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." |
| ) |
| }, |
| ) |
| dtype: Optional[str] = field( |
| default="float32", |
| metadata={ |
| "help": ( |
| "The data type (dtype) in which to load the model weights. One of `float32` (full-precision), " |
| "`float16` or `bfloat16` (both half-precision)." |
| ) |
| }, |
| ) |
| attn_type: Optional[str] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "Which attention implementation to use in the encoder and decoder attention layers. Can be one of:\n" |
| "1. `None`: default Transformers attention implementation." |
| "2. `flash_attn`: Flash Attention through PyTorch SDPA. Requires `torch>=2.0` and `optimum` to be installed. Recommended for hardware where Flash Attention 2 is not supported, e.g. Turing GPUs, (T4, RTX 2080)" |
| "3. `flash_attn_2`: Flash Attention 2 through the Flash Attention package https://github.com/Dao-AILab/flash-attention. **Always** recommended on supported hardware (Ampere, Ada, or Hopper GPUs, e.g., A100, RTX 3090, RTX 4090, H100)" |
| ) |
| }, |
| ) |
| compile_encoder: Optional[bool] = field( |
| default=True, |
| metadata={ |
| "help": "Whether or not to enable torch compile in the encoder module. Requires `torch>=2.0` to be installed." |
| }, |
| ) |
|
|
|
|
| @dataclass |
| class DataTrainingArguments: |
| """ |
| Arguments pertaining to what data we are going to input our model for training and eval. |
| """ |
|
|
| dataset_name: str = field( |
| default=None, |
| metadata={"help": "The name of the dataset to use (via the datasets library)."}, |
| ) |
| dataset_config_name: Optional[str] = field( |
| default=None, |
| metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}, |
| ) |
| dataset_cache_dir: Optional[str] = field( |
| default=None, |
| metadata={"help": "Path to cache directory for saving and loading datasets"}, |
| ) |
| overwrite_cache: bool = field( |
| default=False, |
| metadata={"help": "Overwrite the cached training and evaluation sets"}, |
| ) |
| preprocessing_num_workers: Optional[int] = field( |
| default=None, |
| metadata={"help": "The number of processes to use for the preprocessing."}, |
| ) |
| audio_column_name: str = field( |
| default="audio", |
| metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"}, |
| ) |
| text_column_name: str = field( |
| default="text", |
| metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'."}, |
| ) |
| id_column_name: str = field( |
| default="id", |
| metadata={"help": "The name of the dataset column containing the id data. Defaults to 'id'"}, |
| ) |
| max_label_length: int = field( |
| default=128, |
| metadata={"help": "Truncate transcriptions that are longer `max_label_length` tokens."}, |
| ) |
| preprocessing_only: bool = field( |
| default=False, |
| metadata={ |
| "help": ( |
| "Whether to only do data preprocessing and skip training. This is" |
| " especially useful when data preprocessing errors out in distributed" |
| " training due to timeout. In this case, one should run the" |
| " preprocessing in a non-distributed setup with" |
| " `preprocessing_only=True` so that the cached datasets can" |
| " consequently be loaded in distributed training" |
| ) |
| }, |
| ) |
| data_split_name: str = field( |
| default="train+validation+test", |
| metadata={ |
| "help": ( |
| "The name of the data set splits to use (via the datasets library)." |
| " Defaults to 'train+validation+test'. Multiple splits can be passed by splitting a" |
| " list through the '+' character, e.g. 'train+validation' will" |
| " pseudo-label both the 'train' and 'validation' splits sequentially." |
| ) |
| }, |
| ) |
| wandb_project: str = field( |
| default="distil-whisper", |
| metadata={"help": "The name of the wandb project."}, |
| ) |
| streaming: bool = field( |
| default=False, |
| metadata={"help": "Whether to use dataset's streaming mode to load and pre-process the data."}, |
| ) |
| max_samples_per_split: Optional[int] = field( |
| default=None, |
| metadata={"help": "For debugging purposes, truncate the number of examples per split to this value if set."}, |
| ) |
| return_timestamps: bool = field( |
| default=False, |
| metadata={ |
| "help": "Whether to return the timestamps with the text. This enables the `FlaxWhisperTimestampsLogitsProcessor`." |
| }, |
| ) |
| language: str = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "Language for multilingual distillation. This argument should be set for multilingual distillation " |
| "only. For English speech recognition, it should be left as `None`." |
| ) |
| }, |
| ) |
| task: str = field( |
| default="transcribe", |
| metadata={ |
| "help": "Task, either `transcribe` for speech recognition or `translate` for speech translation." |
| "This argument should be set for multilingual distillation only. For English speech recognition, it should be left as `None`." |
| }, |
| ) |
| decode_token_ids: bool = field( |
| default=True, |
| metadata={"help": "Whether or not to decode the predicted token ids to text transcriptions."}, |
| ) |
| private_dataset: bool = field( |
| default=False, |
| metadata={"help": "Whether or not to create a private dataset for the pseudo-labelled data."}, |
| ) |
|
|
|
|
| def shift_tokens_right(label_ids: np.array, decoder_start_token_id: int) -> np.ndarray: |
| """ |
| Shift label ids one token to the right. |
| """ |
| shifted_label_ids = np.zeros_like(label_ids) |
| shifted_label_ids[:, 1:] = label_ids[:, :-1] |
| shifted_label_ids[:, 0] = decoder_start_token_id |
|
|
| return shifted_label_ids |
|
|
|
|
| @dataclass |
| class DataCollatorSpeechSeq2SeqWithPadding: |
| """ |
| Data collator that will dynamically pad the inputs received. |
| Args: |
| processor ([`Wav2Vec2Processor`]) |
| The processor used for proccessing the data. |
| decoder_start_token_id (:obj: `int`) |
| The start-of-sequence token id of the decoder. |
| decoder_prev_token_id (:obj: `int`) |
| The start-of-prompt token id of the decoder |
| input_padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): |
| Select a strategy to pad the returned input sequences (according to the model's padding side and padding index) |
| among: |
| * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single |
| sequence if provided). |
| * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the |
| maximum acceptable input length for the model if that argument is not provided. |
| * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of |
| different lengths). |
| target_padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): |
| Select a strategy to pad the returned target sequences (according to the model's padding side and padding index). |
| See above for details. |
| max_target_length (:obj:`int`, `optional`): |
| Maximum length of the ``labels`` of the returned list and optionally padding length (see above). |
| """ |
|
|
| processor: Any |
| decoder_start_token_id: int |
| decoder_prev_token_id: int |
| input_padding: Union[bool, str] = "max_length" |
| target_padding: Union[bool, str] = "max_length" |
| max_target_length: Optional[int] = None |
|
|
| def __call__(self, features: List[Dict[str, Union[List[int], np.ndarray]]]) -> Dict[str, np.ndarray]: |
| |
| |
| model_input_name = self.processor.model_input_names[0] |
|
|
| |
| input_features = {model_input_name: [feature[model_input_name] for feature in features]} |
| label_features = {"input_ids": [feature["labels"] for feature in features]} |
| file_ids = {"input_ids": [feature["file_id"] for feature in features]} |
|
|
| |
| batch = self.processor.feature_extractor.pad( |
| input_features, |
| padding=self.input_padding, |
| return_tensors="pt", |
| ) |
|
|
| labels_batch = self.processor.tokenizer.pad( |
| label_features, |
| max_length=self.max_target_length, |
| padding=self.target_padding, |
| return_tensors="pt", |
| ) |
|
|
| file_ids_batch = self.processor.tokenizer.pad( |
| file_ids, |
| max_length=self.max_target_length, |
| padding=self.target_padding, |
| return_tensors="pt", |
| ) |
|
|
| |
| labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) |
|
|
| |
| |
| if set(torch.unique(labels[:, 0])).issubset({self.decoder_start_token_id, self.decoder_prev_token_id}): |
| labels = labels[:, 1:] |
|
|
| |
| bos_index = torch.argmax((labels == self.decoder_start_token_id).long(), dim=1) |
| prompt_mask = torch.arange(labels.shape[1]) < bos_index[:, None] |
| labels = torch.where(prompt_mask, -100, labels) |
|
|
| batch["labels"] = labels |
| batch["file_ids"] = file_ids_batch["input_ids"] |
|
|
| return batch |
|
|
|
|
| def log_metric( |
| accelerator, |
| metrics: Dict, |
| train_time: float, |
| prefix: str = "eval", |
| ): |
| """Helper function to log all evaluation metrics with the correct prefixes and styling.""" |
| log_metrics = {} |
| for k, v in metrics.items(): |
| log_metrics[f"{prefix}/{k}"] = v |
| log_metrics[f"{prefix}/time"] = train_time |
| accelerator.log(log_metrics) |
|
|
|
|
| def log_pred( |
| accelerator, |
| pred_str: List[str], |
| label_str: List[str], |
| norm_pred_str: List[str], |
| norm_label_str: List[str], |
| prefix: str = "eval", |
| num_lines: int = 200000, |
| ): |
| """Helper function to log target/predicted transcriptions to weights and biases (wandb).""" |
| if accelerator.is_main_process: |
| wandb_tracker = accelerator.get_tracker("wandb") |
| |
| prefix = prefix.replace("/", "-") |
|
|
| |
| str_data = [[label_str[i], pred_str[i], norm_label_str[i], norm_pred_str[i]] for i in range(len(pred_str))] |
| |
| wandb_tracker.log_table( |
| table_name=f"{prefix}/all_predictions", |
| columns=["Target", "Pred", "Norm Target", "Norm Pred"], |
| data=str_data[:num_lines], |
| ) |
|
|
| |
| str_data = np.asarray(str_data) |
| str_data_incorrect = str_data[str_data[:, -2] != str_data[:, -1]] |
| |
| wandb_tracker.log_table( |
| table_name=f"{prefix}/incorrect_predictions", |
| columns=["Target", "Pred", "Norm Target", "Norm Pred"], |
| data=str_data_incorrect[:num_lines], |
| ) |
|
|
|
|
| def main(): |
| |
| |
| parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) |
|
|
| if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): |
| |
| |
| model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) |
| else: |
| model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
|
|
| |
| |
| |
| |
| |
| if model_args.dtype == "float16": |
| mixed_precision = "fp16" |
| torch_dtype = torch.float16 |
| elif model_args.dtype == "bfloat16": |
| mixed_precision = "bf16" |
| torch_dtype = torch.bfloat16 |
| else: |
| mixed_precision = "no" |
| torch_dtype = torch.float32 |
|
|
| kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=7200)) |
|
|
| accelerator = Accelerator( |
| gradient_accumulation_steps=training_args.gradient_accumulation_steps, |
| mixed_precision=mixed_precision, |
| log_with=training_args.report_to, |
| project_dir=training_args.output_dir, |
| kwargs_handlers=[kwargs], |
| ) |
|
|
| accelerator.init_trackers(project_name=data_args.wandb_project) |
|
|
| |
| |
| logging.basicConfig( |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| datefmt="%m/%d/%Y %H:%M:%S", |
| level=logging.INFO, |
| ) |
| |
| logger.warning( |
| f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " |
| f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" |
| ) |
|
|
| |
| if accelerator.is_local_main_process: |
| datasets.utils.logging.set_verbosity_warning() |
| transformers.utils.logging.set_verbosity_info() |
| else: |
| datasets.utils.logging.set_verbosity_error() |
| transformers.utils.logging.set_verbosity_error() |
| logger.info("Training/evaluation parameters %s", training_args) |
|
|
| |
| raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() |
| token = model_args.token if model_args.token is not None else HfFolder().get_token() |
|
|
| data_splits = data_args.data_split_name.split("+") |
| for split in data_splits: |
| if data_args.streaming: |
| raw_datasets[split] = load_dataset( |
| data_args.dataset_name, |
| data_args.dataset_config_name, |
| split=split, |
| cache_dir=data_args.dataset_cache_dir, |
| token=token, |
| streaming=True, |
| ) |
| else: |
| raw_datasets[split] = load_dataset( |
| data_args.dataset_name, |
| data_args.dataset_config_name, |
| split=split, |
| cache_dir=data_args.dataset_cache_dir, |
| token=token, |
| streaming=False, |
| num_proc=data_args.preprocessing_num_workers, |
| ) |
|
|
| if data_args.audio_column_name not in next(iter(raw_datasets.values())).column_names: |
| raise ValueError( |
| f"--audio_column_name '{data_args.audio_column_name}' not found in dataset" |
| f" '{data_args.dataset_name}'. Make sure to set `--audio_column_name` to" |
| " the correct audio column - one of" |
| f" {', '.join(next(iter(raw_datasets.values())).column_names)}." |
| ) |
|
|
| if data_args.text_column_name not in next(iter(raw_datasets.values())).column_names: |
| raise ValueError( |
| f"--text_column_name {data_args.text_column_name} not found in dataset" |
| f" '{data_args.dataset_name}'. Make sure to set `--text_column_name` to the" |
| " correct text column - one of" |
| f" {', '.join(next(iter(raw_datasets.values())).column_names)}." |
| ) |
|
|
| |
| config = WhisperConfig.from_pretrained( |
| (model_args.config_name if model_args.config_name else model_args.model_name_or_path), |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| token=token, |
| ) |
| feature_extractor = WhisperFeatureExtractor.from_pretrained( |
| (model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path), |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| token=token, |
| ) |
| tokenizer = WhisperTokenizerFast.from_pretrained( |
| (model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), |
| cache_dir=model_args.cache_dir, |
| use_fast=model_args.use_fast_tokenizer, |
| revision=model_args.model_revision, |
| token=token, |
| ) |
| processor = WhisperProcessor.from_pretrained( |
| (model_args.processor_name if model_args.processor_name else model_args.model_name_or_path), |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| token=token, |
| ) |
| model = WhisperForConditionalGeneration.from_pretrained( |
| model_args.model_name_or_path, |
| config=config, |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| subfolder=model_args.subfolder, |
| token=token, |
| low_cpu_mem_usage=True, |
| torch_dtype=torch_dtype, |
| use_flash_attention_2=model_args.attn_type == "flash_attn_2", |
| ) |
|
|
| if model_args.attn_type == "flash_attn": |
| model = model.to_bettertransformer() |
| elif model_args.attn_type not in [None, "flash_attn", "flash_attn_2"]: |
| raise ValueError( |
| f"Argument `attn_type` is set to {model_args.attn_type}. Should be one of:" |
| "1. `None`: default Transformers attention implementation." |
| "2. `flash_attn`: Flash Attention through PyTorch SDPA. Requires `torch>=2.0` and `optimum` to be installed. Recommended for hardware where Flash Attention 2 is not supported, e.g. Turing GPUs, (T4, RTX 2080)." |
| "3. `flash_attn_2`: Flash Attention 2 through the Flash Attention package https://github.com/Dao-AILab/flash-attention. **Always** recommended on supported hardware (Ampere, Ada, or Hopper GPUs, e.g., A100, RTX 3090, RTX 4090, H100)." |
| ) |
|
|
| if model_args.compile_encoder: |
| model.model.encoder.forward = torch.compile( |
| model.model.encoder.forward, mode="reduce-overhead", fullgraph=True |
| ) |
|
|
| model.eval() |
|
|
| if model.config.decoder_start_token_id is None: |
| raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") |
|
|
| return_timestamps = data_args.return_timestamps |
| if hasattr(model.generation_config, "is_multilingual") and model.generation_config.is_multilingual: |
| |
| tokenizer.set_prefix_tokens( |
| language=data_args.language, task=data_args.task, predict_timestamps=return_timestamps |
| ) |
| elif data_args.language is not None: |
| raise ValueError( |
| "Setting language token for an English-only checkpoint is not permitted. The language argument should " |
| "only be set for multilingual checkpoints." |
| ) |
|
|
| |
| |
| raw_datasets = raw_datasets.cast_column( |
| data_args.audio_column_name, |
| datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate), |
| ) |
|
|
| |
| |
| max_label_length = ( |
| data_args.max_label_length if data_args.max_label_length is not None else model.config.max_length |
| ) |
| audio_column_name = data_args.audio_column_name |
| num_workers = data_args.preprocessing_num_workers |
| dataloader_num_workers = training_args.dataloader_num_workers |
| text_column_name = data_args.text_column_name |
| model_input_name = feature_extractor.model_input_names[0] |
| id_column_name = data_args.id_column_name |
| normalizer = EnglishTextNormalizer(tokenizer.english_spelling_normalizer) |
|
|
| if data_args.max_samples_per_split is not None: |
| for split in data_splits: |
| raw_datasets[split] = ( |
| raw_datasets[split].take(data_args.max_samples_per_split) |
| if data_args.streaming |
| else raw_datasets[split].select(range(data_args.max_samples_per_split)) |
| ) |
|
|
| def prepare_dataset(batch): |
| |
| sample = batch[audio_column_name] |
| inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"]) |
| |
| batch[model_input_name] = inputs.get(model_input_name)[0] |
|
|
| |
| input_str = batch[text_column_name] |
| batch["labels"] = tokenizer(input_str, max_length=max_label_length, truncation=True).input_ids |
|
|
| |
| batch["file_id"] = tokenizer(batch[id_column_name], add_special_tokens=False).input_ids |
| return batch |
|
|
| raw_datasets_features = list(next(iter(raw_datasets.values())).features.keys()) |
| if data_args.streaming: |
| vectorized_datasets = raw_datasets.map(prepare_dataset, remove_columns=raw_datasets_features) |
| else: |
| vectorized_datasets = raw_datasets.map( |
| prepare_dataset, |
| remove_columns=raw_datasets_features, |
| num_proc=num_workers, |
| desc="preprocess dataset", |
| ) |
|
|
| |
| |
| |
| |
| |
| if data_args.preprocessing_only: |
| cache = {k: v.cache_files for k, v in vectorized_datasets.items()} |
| logger.info(f"Data preprocessing finished. Files cached at {cache}.") |
| return |
|
|
| if data_args.streaming and dataloader_num_workers > 0: |
| logger.warning( |
| "Using multiple dataloader num workers with streaming mode will result in different shards of " |
| "data being transcribed in parallel. This is not advised if you want to preserve the order of the " |
| "audio-text data." |
| ) |
|
|
| |
| output_dir = training_args.output_dir |
| if training_args.push_to_hub: |
| if training_args.hub_model_id is None: |
| repo_name = get_full_repo_name( |
| Path(output_dir).absolute().name, |
| token=token, |
| ) |
| else: |
| repo_name = training_args.hub_model_id |
| create_repo(repo_name, exist_ok=True, token=token, repo_type="dataset", private=data_args.private_dataset) |
| repo = Repository( |
| output_dir, |
| clone_from=repo_name, |
| token=token, |
| repo_type="dataset", |
| ) |
| |
| with open(os.path.join(output_dir, ".gitattributes"), "r+") as f: |
| git_lfs_extensions = f.read() |
| if "*.csv" not in git_lfs_extensions: |
| f.write("*.csv filter=lfs diff=lfs merge=lfs -text") |
| else: |
| |
| if not os.path.exists(output_dir): |
| os.makedirs(output_dir) |
|
|
| |
| metric = evaluate.load("wer") |
| |
| all_punctuation = list(string.punctuation.replace("'", "")) |
|
|
| def compute_metrics(preds, labels, file_ids): |
| |
| for idx in range(len(labels)): |
| labels[idx][labels[idx] == -100] = tokenizer.pad_token_id |
|
|
| pred_str = tokenizer.batch_decode(preds, skip_special_tokens=True, decode_with_timestamps=return_timestamps) |
| |
| label_str = tokenizer.batch_decode(labels, skip_special_tokens=True) |
|
|
| |
| spaced_pred_str = [ |
| pred_str[i].replace(punctuation, f" {punctuation} ") |
| for punctuation in all_punctuation |
| for i in range(len(pred_str)) |
| ] |
| spaced_label_str = [ |
| label_str[i].replace(punctuation, f" {punctuation} ") |
| for punctuation in all_punctuation |
| for i in range(len(label_str)) |
| ] |
| wer_ortho = 100 * metric.compute(predictions=spaced_pred_str, references=spaced_label_str) |
|
|
| |
| norm_pred_str = [normalizer(pred) for pred in pred_str] |
| norm_label_str = [normalizer(label) for label in label_str] |
| |
| pred_str = [pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] |
| label_str = [label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] |
| file_ids = [file_ids[i] for i in range(len(file_ids)) if len(norm_label_str[i]) > 0] |
| |
| norm_pred_str = [norm_pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] |
| norm_label_str = [norm_label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] |
|
|
| wer = 100 * metric.compute(predictions=norm_pred_str, references=norm_label_str) |
|
|
| return {"wer": wer, "wer_ortho": wer_ortho}, pred_str, label_str, norm_pred_str, norm_label_str, file_ids |
|
|
| |
| per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) |
|
|
| data_collator = DataCollatorSpeechSeq2SeqWithPadding( |
| processor=processor, |
| decoder_start_token_id=model.config.decoder_start_token_id, |
| decoder_prev_token_id=tokenizer.all_special_ids[-3], |
| input_padding="longest", |
| target_padding="max_length", |
| max_target_length=max_label_length, |
| ) |
|
|
| |
| |
| num_beams = ( |
| training_args.generation_num_beams |
| if training_args.generation_num_beams is not None |
| else getattr(model.generation_config, "num_beams", 1) |
| ) |
|
|
| gen_kwargs = { |
| "max_length": max_label_length, |
| "num_beams": num_beams, |
| "return_timestamps": return_timestamps, |
| } |
| if hasattr(model.generation_config, "is_multilingual") and model.generation_config.is_multilingual: |
| |
| gen_kwargs.update( |
| { |
| "language": data_args.language, |
| "task": data_args.task, |
| } |
| ) |
|
|
| |
| model = accelerator.prepare(model) |
|
|
| def eval_step_with_save(split="eval"): |
| |
| eval_preds = [] |
| eval_labels = [] |
| eval_ids = [] |
| eval_start = time.time() |
|
|
| eval_loader = DataLoader( |
| vectorized_datasets[split], |
| batch_size=per_device_eval_batch_size, |
| collate_fn=data_collator, |
| num_workers=dataloader_num_workers, |
| pin_memory=True, |
| ) |
|
|
| eval_loader = accelerator.prepare(eval_loader) |
| batches = tqdm(eval_loader, desc=f"Evaluating {split}...", disable=not accelerator.is_local_main_process) |
|
|
| |
| split = split.replace(".", "-").split("/")[-1] |
| output_csv = os.path.join(output_dir, f"{split}-transcription.csv") |
|
|
| for step, batch in enumerate(batches): |
| file_ids = batch.pop("file_ids") |
| |
| generated_ids = model.module.generate(batch["input_features"].to(dtype=torch_dtype), **gen_kwargs) |
| generated_ids = accelerator.pad_across_processes(generated_ids, dim=1, pad_index=tokenizer.pad_token_id) |
| |
| file_ids, generated_ids, labels = accelerator.gather_for_metrics( |
| (file_ids, generated_ids, batch["labels"]) |
| ) |
| eval_preds.extend(generated_ids.cpu().numpy()) |
| eval_labels.extend(labels.cpu().numpy()) |
| file_ids = tokenizer.batch_decode(file_ids, skip_special_tokens=True) |
| eval_ids.extend(file_ids) |
|
|
| if step % training_args.logging_steps == 0 and step > 0: |
| batches.write(f"Saving transcriptions for split {split} step {step}") |
| accelerator.wait_for_everyone() |
| if data_args.decode_token_ids: |
| eval_preds = tokenizer.batch_decode( |
| eval_preds, skip_special_tokens=True, decode_with_timestamps=return_timestamps |
| ) |
| csv_data = [[eval_ids[i], eval_preds[i]] for i in range(len(eval_preds))] |
|
|
| with open(output_csv, "w", encoding="UTF8", newline="") as f: |
| writer = csv.writer(f) |
| |
| writer.writerow(["file_id", "whisper_transcript"]) |
| writer.writerows(csv_data) |
|
|
| if training_args.push_to_hub and accelerator.is_main_process: |
| repo.push_to_hub( |
| commit_message=f"Saving transcriptions for split {split} step {step}.", |
| blocking=False, |
| ) |
|
|
| accelerator.wait_for_everyone() |
| eval_time = time.time() - eval_start |
|
|
| |
| wer_desc = "" |
| if "validation" in split or "test" in split: |
| wer_metric, pred_str, label_str, norm_pred_str, norm_label_str, eval_ids = compute_metrics( |
| eval_preds, eval_labels, eval_ids |
| ) |
| wer_desc = " ".join([f"Eval {key}: {value} |" for key, value in wer_metric.items()]) |
| |
| log_metric( |
| accelerator, |
| metrics=wer_metric, |
| train_time=eval_time, |
| prefix=split, |
| ) |
| log_pred( |
| accelerator, |
| pred_str, |
| label_str, |
| norm_pred_str, |
| norm_label_str, |
| prefix=split, |
| ) |
| if data_args.decode_token_ids: |
| eval_preds = pred_str |
| elif data_args.decode_token_ids: |
| eval_preds = tokenizer.batch_decode( |
| eval_preds, skip_special_tokens=True, decode_with_timestamps=return_timestamps |
| ) |
|
|
| batches.write(f"Saving final transcriptions for split {split}.") |
| csv_data = [[eval_ids[i], eval_preds[i]] for i in range(len(eval_preds))] |
| with open(output_csv, "w", encoding="UTF8", newline="") as f: |
| writer = csv.writer(f) |
| |
| writer.writerow(["file_id", "whisper_transcript"]) |
| writer.writerows(csv_data) |
|
|
| |
| logger.info(wer_desc) |
|
|
| if not data_args.streaming: |
| raw_datasets[split] = raw_datasets[split].add_column("whisper_transcript", eval_preds) |
|
|
| logger.info("***** Running Labelling *****") |
| logger.info(" Instantaneous batch size per device =" f" {training_args.per_device_eval_batch_size}") |
| logger.info( |
| f" Total eval batch size (w. parallel & distributed) = {training_args.per_device_eval_batch_size * accelerator.num_processes}" |
| ) |
| logger.info(f" Predict labels with timestamps = {return_timestamps}") |
| logger.info(f" Decode labels to transcriptions = {data_args.decode_token_ids}") |
| for split in data_splits: |
| eval_step_with_save(split=split) |
| accelerator.wait_for_everyone() |
| if training_args.push_to_hub and accelerator.is_main_process: |
| repo.push_to_hub( |
| commit_message=f"Saving final transcriptions for split {split.replace('.', '-').split('/')[-1]}", |
| blocking=False, |
| ) |
| if not data_args.streaming and accelerator.is_main_process: |
| raw_datasets.save_to_disk(output_dir, num_proc=num_workers) |
| if training_args.push_to_hub: |
| raw_datasets.push_to_hub(repo_name, config_name=data_args.dataset_config_name) |
| accelerator.end_training() |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|