| """ |
| 2025.10.1 |
| 2025.10.1 |
| 4.56.2 |
| 0.22.2 |
| __UNSLOTH_VERSIONING__ |
| """ |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| from torch import Tensor |
| import torch |
| import torch.nn as nn |
| from torch.nn import functional as F |
| from typing import Any, List, Optional, Tuple, Union, Dict, Set, Callable |
| from trl.trainer.online_dpo_trainer import (Any, AutoModelForCausalLM, BaseImageProcessor, BasePairwiseJudge, Callable, DPODataCollatorWithPadding, DataCollator, DataLoader, Dataset, EvalPrediction, F, FeatureExtractionMixin, GenerationConfig, IterableDataset, OnlineDPOConfig, OnlineDPOTrainer, OptimizerNames, Optional, Path, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, SIMPLE_CHAT_TEMPLATE, Trainer, TrainerCallback, Union, apply_chat_template, create_reference_model, datasets, disable_dropout_in_model, empty_cache, generate_model_card, get_comet_experiment_url, get_reward, is_conversational, is_wandb_available, jinja2, logger, logging, maybe_apply_chat_template, nn, os, prepare_deepspeed, prepare_peft_model, seed_worker, textwrap, torch, truncate_right, unwrap_model_for_generation, version, wandb, wraps, F, is_conversational, os, torch, F, Optional, PreTrainedModel, Trainer, logger, os, torch) |
|
|
|
|
| import os |
| from typing import * |
| from dataclasses import dataclass, field |
| from packaging.version import Version |
| import torch |
| import numpy as np |
| from contextlib import nullcontext |
| from torch.nn import functional as F |
| from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling as TransformersDataCollatorForLanguageModeling |
| from transformers.training_args import ParallelMode |
|
|
| |
| import functools |
| from types import MethodType |
| def prepare_for_training_mode(f): |
| @functools.wraps(f) |
| def wrapper(self, *args, **kwargs): |
| |
| if hasattr(self, 'model') and hasattr(self.model, "for_training"): |
| self.model.for_training() |
| output = f(self, *args, **kwargs) |
| |
| if hasattr(self, 'model') and hasattr(self.model, "for_inference"): |
| self.model.for_inference() |
| return output |
| return wrapper |
| pass |
|
|
| torch_compile_options = { |
| "epilogue_fusion" : True, |
| "max_autotune" : False, |
| "shape_padding" : True, |
| "trace.enabled" : False, |
| "triton.cudagraphs" : False, |
| } |
|
|
| @torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,) |
| def chunked_selective_log_softmax(logits, index): |
| |
| chunked_logits = torch.chunk(logits.reshape(-1, logits.shape[-1]), chunks = 4, dim = 0) |
| chunked_index = torch.chunk(index.reshape(-1), chunks = 4, dim = 0) |
| all_per_token_logps = [] |
| |
| for chunk_logits, chunk_index in zip(chunked_logits, chunked_index): |
| chunk_logits = chunk_logits.to(torch.float32) |
| selected_logits = torch.gather(chunk_logits, dim = -1, index = chunk_index.unsqueeze(-1)).squeeze(-1) |
| logsumexp_values = torch.logsumexp(chunk_logits, dim = -1) |
| per_token_logps = selected_logits - logsumexp_values |
| all_per_token_logps.append(per_token_logps) |
| pass |
| all_per_token_logps = torch.concat(all_per_token_logps) |
| all_per_token_logps = all_per_token_logps.reshape((logits.shape[0], logits.shape[1])) |
| return all_per_token_logps |
|
|
| def calculate_pad_tokens_in_prompt( |
| input_ids: torch.Tensor, |
| logits_to_keep: int, |
| pad_token_id: int |
| ) -> torch.Tensor: |
| """ |
| Given prompt tensor, it returns all the left padded tokens in that sequence. so [pad, pad, pad, cat] = 3 tokens |
| """ |
| if logits_to_keep >= input_ids.shape[1]: |
| raise ValueError("logits_to_keep must be smaller than the sequence length.") |
|
|
| prompt_section = input_ids[:, :-logits_to_keep] |
|
|
| padding_mask = (prompt_section == pad_token_id) |
|
|
| pad_token_counts = padding_mask.sum(dim=1) |
|
|
| return pad_token_counts |
|
|
| def create_completion_attention_mask( |
| completion_input_ids: torch.Tensor, |
| left_pad_tokens_per_prompt: torch.Tensor, |
| max_left_pad: int, |
| pad_token_id: int |
| ) -> torch.Tensor: |
| """ |
| Given that we have a sequence, [p,p,p,c,c,c,pad,pad,pad] |
| |
| Where p are extra prompt tokens we got from slicing the torch tensor, c is completion tokens |
| and pad are pad tokens, this function would make a completion mask that would 0 out the pad |
| and p tokens. so in this example [0,0,0,1,1,1,0,0,0] |
| """ |
| batch_size, completion_len = completion_input_ids.shape |
| device = completion_input_ids.device |
|
|
| num_tokens_to_mask = max_left_pad - left_pad_tokens_per_prompt |
|
|
| indices = torch.arange(completion_len, device=device).unsqueeze(0) |
| shift_mask = indices >= num_tokens_to_mask.unsqueeze(1) |
|
|
| non_padding_mask = (completion_input_ids != pad_token_id) |
|
|
| final_mask = shift_mask & non_padding_mask |
|
|
| return final_mask |
|
|
| def left_pack_padding(tensor: torch.Tensor, pad_id: int) -> torch.Tensor: |
| """ |
| Moves all padding tokens in each sequence of a batch to the right. |
| """ |
| mask = (tensor != pad_id) |
| |
| sorted_indices = torch.argsort(mask, dim=1, descending=True, stable=True) |
| packed_tensor = torch.gather(tensor, 1, sorted_indices) |
| return packed_tensor |
| def vLLMSamplingParams(**kwargs): |
| from vllm import SamplingParams |
| sampling_params = SamplingParams(**kwargs) |
| sampling_params._set_kwargs = kwargs |
| return sampling_params |
| @dataclass |
| class UnslothOnlineDPOConfig(OnlineDPOConfig): |
| """ |
| |
| Configuration class for the [`OnlineDPOTrainer`]. |
| |
| This class includes only the parameters that are specific to Online DPO training. For a full list of training |
| arguments, please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this |
| class may differ from those in [`~transformers.TrainingArguments`]. |
| |
| Using [`~transformers.HfArgumentParser`] we can turn this class into |
| [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the |
| command line. |
| |
| Parameters: |
| reward_model_path (`str` or `None`, *optional*, defaults to `None`): |
| Path to the reward model. Either `judge` or `reward_model_path` must be set, but not both. |
| judge (`str` or `None`, *optional*, defaults to `None`): |
| Name of the judge to use. Either `judge` or `reward_model_path` must be set, but not both. |
| max_new_tokens (`int`, *optional*, defaults to `64`): |
| Maximum number of tokens to generate per completion. |
| max_length (`int`, *optional*, defaults to `256`): |
| Maximum total length of the sequence (prompt + completion) used to compute log probabilities. If the |
| sequence exceeds this limit, the leftmost tokens will be truncated to preserve as much of the completion as |
| possible. |
| temperature (`float`, *optional*, defaults to `0.9`): |
| Temperature for sampling. The higher the temperature, the more random the completions. |
| missing_eos_penalty (`float` or `None`, *optional*, defaults to `None`): |
| Penalty applied to the score when the model fails to generate an EOS token. This is useful to encourage to |
| generate completions shorter than the maximum length (`max_new_tokens`). The penalty must be a positive |
| value. |
| beta (`float` or `list[float]`, *optional*, defaults to `0.1`): |
| Parameter controlling the deviation from the reference model. Higher β means less deviation from the |
| reference model. For the IPO loss (`loss_type="ipo"`), β is the regularization parameter denoted by τ in |
| the [paper](https://huggingface.co/papers/2310.12036). If a list of floats is provided then the β is |
| selected for each new epoch and the last β is used for the rest of the epochs. |
| loss_type (`str`, *optional*, defaults to `"sigmoid"`): |
| Type of loss to use. Possible values are: |
| |
| - `"sigmoid"`: sigmoid loss from the original [DPO](https://huggingface.co/papers/2305.18290) paper. |
| - `"ipo"`: IPO loss from the [IPO](https://huggingface.co/papers/2310.12036) paper. |
| |
| dataset_num_proc (`int` or `None`, *optional*, defaults to `None`): |
| Number of processes to use for processing the dataset. |
| disable_dropout (`bool`, *optional*, defaults to `True`): |
| Whether to disable dropout in the model and reference model. |
| use_vllm (`bool`, *optional*, defaults to `False`): |
| Whether to use vLLM for generating completions. Requires vLLM to be installed (`pip install vllm`). |
| vllm_model_impl (`str`, *optional*, defaults to `"vllm"`): |
| Model implementation to use for vLLM. Must be one of `"transformers"` or `"vllm"`. `"transformers"`: Use |
| the `transformers` backend for model implementation. `"vllm"`: Use the `vllm` library for model |
| implementation. |
| gpu_memory_utilization (`float`, *optional*, defaults to `0.55`): |
| The vLLM memory utilization. The default value is 0.55. |
| ds3_gather_for_generation (`bool`, *optional*, defaults to `True`): |
| This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for generation, |
| improving generation speed. However, disabling this option allows training models that exceed the VRAM |
| capacity of a single GPU, albeit at the cost of slower generation. |
| model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`): |
| Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a |
| string. |
| |
| """ |
| vllm_sampling_params: Optional[Any] = field( |
| default = None, |
| metadata = {'help': 'vLLM SamplingParams'}, |
| ) |
| unsloth_num_chunks : Optional[int] = field( |
| default = -1, |
| metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'}, |
| ) |
| max_seq_length : Optional[int] = field( |
| default = None, |
| metadata = {'help': 'Maximum sequence length to truncate to.'}, |
| ) |
| def __init__( |
| self, |
| output_dir = None, |
| overwrite_output_dir = None, |
| do_train = False, |
| do_eval = False, |
| do_predict = False, |
| eval_strategy = 'no', |
| prediction_loss_only = False, |
| per_device_train_batch_size = 4, |
| per_device_eval_batch_size = 4, |
| per_gpu_train_batch_size = None, |
| per_gpu_eval_batch_size = None, |
| gradient_accumulation_steps = 2, |
| eval_accumulation_steps = 2, |
| eval_delay = 0, |
| torch_empty_cache_steps = 250, |
| learning_rate = 5e-05, |
| weight_decay = 0.01, |
| adam_beta1 = 0.9, |
| adam_beta2 = 0.999, |
| adam_epsilon = 1e-08, |
| max_grad_norm = 1.0, |
| num_train_epochs = 3.0, |
| max_steps = -1, |
| lr_scheduler_type = 'linear', |
| warmup_ratio = 0.1, |
| warmup_steps = 0, |
| log_level = 'passive', |
| log_level_replica = 'warning', |
| log_on_each_node = True, |
| logging_dir = None, |
| logging_strategy = 'steps', |
| logging_first_step = False, |
| logging_steps = 1, |
| logging_nan_inf_filter = False, |
| save_strategy = 'steps', |
| save_steps = 500, |
| save_total_limit = None, |
| save_safetensors = True, |
| save_on_each_node = False, |
| save_only_model = False, |
| restore_callback_states_from_checkpoint = False, |
| no_cuda = False, |
| use_cpu = False, |
| use_mps_device = False, |
| seed = 3407, |
| data_seed = 3407, |
| jit_mode_eval = False, |
| use_ipex = False, |
| bf16 = False, |
| fp16 = False, |
| fp16_opt_level = 'O1', |
| half_precision_backend = 'auto', |
| bf16_full_eval = False, |
| fp16_full_eval = False, |
| tf32 = None, |
| local_rank = -1, |
| ddp_backend = None, |
| tpu_num_cores = None, |
| tpu_metrics_debug = False, |
| debug = '', |
| dataloader_drop_last = False, |
| eval_steps = None, |
| dataloader_num_workers = 0, |
| dataloader_prefetch_factor = None, |
| past_index = -1, |
| run_name = None, |
| disable_tqdm = None, |
| remove_unused_columns = True, |
| label_names = None, |
| load_best_model_at_end = False, |
| metric_for_best_model = None, |
| greater_is_better = None, |
| ignore_data_skip = False, |
| fsdp = '', |
| fsdp_min_num_params = 0, |
| fsdp_config = None, |
| fsdp_transformer_layer_cls_to_wrap = None, |
| accelerator_config = None, |
| parallelism_config = None, |
| deepspeed = None, |
| label_smoothing_factor = 0.0, |
| optim = 'adamw_8bit', |
| optim_args = None, |
| adafactor = False, |
| group_by_length = False, |
| length_column_name = 'length', |
| report_to = None, |
| ddp_find_unused_parameters = None, |
| ddp_bucket_cap_mb = None, |
| ddp_broadcast_buffers = None, |
| dataloader_pin_memory = True, |
| dataloader_persistent_workers = False, |
| skip_memory_metrics = True, |
| use_legacy_prediction_loop = False, |
| push_to_hub = False, |
| resume_from_checkpoint = None, |
| hub_model_id = None, |
| hub_strategy = 'every_save', |
| hub_token = None, |
| hub_private_repo = None, |
| hub_always_push = False, |
| hub_revision = None, |
| gradient_checkpointing = True, |
| gradient_checkpointing_kwargs = None, |
| include_inputs_for_metrics = False, |
| eval_do_concat_batches = True, |
| fp16_backend = 'auto', |
| push_to_hub_model_id = None, |
| push_to_hub_organization = None, |
| push_to_hub_token = None, |
| mp_parameters = '', |
| auto_find_batch_size = False, |
| full_determinism = False, |
| torchdynamo = None, |
| ray_scope = 'last', |
| ddp_timeout = 1800, |
| torch_compile = False, |
| torch_compile_backend = None, |
| torch_compile_mode = None, |
| include_tokens_per_second = False, |
| include_num_input_tokens_seen = False, |
| neftune_noise_alpha = None, |
| optim_target_modules = None, |
| batch_eval_metrics = False, |
| eval_on_start = False, |
| use_liger_kernel = False, |
| liger_kernel_config = None, |
| eval_use_gather_object = False, |
| average_tokens_across_devices = True, |
| reward_model_path = None, |
| judge = None, |
| max_new_tokens = 64, |
| max_length = 512, |
| temperature = 0.9, |
| missing_eos_penalty = None, |
| loss_type = 'sigmoid', |
| dataset_num_proc = None, |
| disable_dropout = True, |
| use_vllm = False, |
| vllm_model_impl = 'vllm', |
| gpu_memory_utilization = 0.55, |
| ds3_gather_for_generation = True, |
| model_init_kwargs = None, |
| vllm_sampling_params = None, |
| unsloth_num_chunks = -1, |
| max_seq_length = None, |
| **kwargs, |
| ): |
| if learning_rate < 1e-7: print(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!') |
| if learning_rate > 1: print(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!') |
| if output_dir is None and save_strategy == 'steps' and save_steps == 500: |
| output_dir = 'unsloth_training_checkpoints' |
| save_strategy = 'no' |
| if dataset_num_proc is None: |
| from multiprocessing import cpu_count |
| dataset_num_proc = max(cpu_count()+4, 2) |
| if temperature <= 0: |
| raise MathError('Unsloth: Please set a positive non-zero temperature since your results will be wrong.') |
| elif temperature >= 10: |
| raise MathError('Unsloth: Please set a positive non-zero temperature less than 10, since sampling will be quite erratic.') |
| |
| |
| super().__init__( |
| output_dir = output_dir, |
| overwrite_output_dir = overwrite_output_dir, |
| do_train = do_train, |
| do_eval = do_eval, |
| do_predict = do_predict, |
| eval_strategy = eval_strategy, |
| prediction_loss_only = prediction_loss_only, |
| per_device_train_batch_size = per_device_train_batch_size, |
| per_device_eval_batch_size = per_device_eval_batch_size, |
| per_gpu_train_batch_size = per_gpu_train_batch_size, |
| per_gpu_eval_batch_size = per_gpu_eval_batch_size, |
| gradient_accumulation_steps = gradient_accumulation_steps, |
| eval_accumulation_steps = eval_accumulation_steps, |
| eval_delay = eval_delay, |
| torch_empty_cache_steps = torch_empty_cache_steps, |
| learning_rate = learning_rate, |
| weight_decay = weight_decay, |
| adam_beta1 = adam_beta1, |
| adam_beta2 = adam_beta2, |
| adam_epsilon = adam_epsilon, |
| max_grad_norm = max_grad_norm, |
| num_train_epochs = num_train_epochs, |
| max_steps = max_steps, |
| lr_scheduler_type = lr_scheduler_type, |
| warmup_ratio = warmup_ratio, |
| warmup_steps = warmup_steps, |
| log_level = log_level, |
| log_level_replica = log_level_replica, |
| log_on_each_node = log_on_each_node, |
| logging_dir = logging_dir, |
| logging_strategy = logging_strategy, |
| logging_first_step = logging_first_step, |
| logging_steps = logging_steps, |
| logging_nan_inf_filter = logging_nan_inf_filter, |
| save_strategy = save_strategy, |
| save_steps = save_steps, |
| save_total_limit = save_total_limit, |
| save_safetensors = save_safetensors, |
| save_on_each_node = save_on_each_node, |
| save_only_model = save_only_model, |
| restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint, |
| no_cuda = no_cuda, |
| use_cpu = use_cpu, |
| use_mps_device = use_mps_device, |
| seed = seed, |
| data_seed = data_seed, |
| jit_mode_eval = jit_mode_eval, |
| use_ipex = use_ipex, |
| bf16 = bf16, |
| fp16 = fp16, |
| fp16_opt_level = fp16_opt_level, |
| half_precision_backend = half_precision_backend, |
| bf16_full_eval = bf16_full_eval, |
| fp16_full_eval = fp16_full_eval, |
| tf32 = tf32, |
| local_rank = local_rank, |
| ddp_backend = ddp_backend, |
| tpu_num_cores = tpu_num_cores, |
| tpu_metrics_debug = tpu_metrics_debug, |
| debug = debug, |
| dataloader_drop_last = dataloader_drop_last, |
| eval_steps = eval_steps, |
| dataloader_num_workers = dataloader_num_workers, |
| dataloader_prefetch_factor = dataloader_prefetch_factor, |
| past_index = past_index, |
| run_name = run_name, |
| disable_tqdm = disable_tqdm, |
| remove_unused_columns = remove_unused_columns, |
| label_names = label_names, |
| load_best_model_at_end = load_best_model_at_end, |
| metric_for_best_model = metric_for_best_model, |
| greater_is_better = greater_is_better, |
| ignore_data_skip = ignore_data_skip, |
| fsdp = fsdp, |
| fsdp_min_num_params = fsdp_min_num_params, |
| fsdp_config = fsdp_config, |
| fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap, |
| accelerator_config = accelerator_config, |
| parallelism_config = parallelism_config, |
| deepspeed = deepspeed, |
| label_smoothing_factor = label_smoothing_factor, |
| optim = optim, |
| optim_args = optim_args, |
| adafactor = adafactor, |
| group_by_length = group_by_length, |
| length_column_name = length_column_name, |
| report_to = report_to, |
| ddp_find_unused_parameters = ddp_find_unused_parameters, |
| ddp_bucket_cap_mb = ddp_bucket_cap_mb, |
| ddp_broadcast_buffers = ddp_broadcast_buffers, |
| dataloader_pin_memory = dataloader_pin_memory, |
| dataloader_persistent_workers = dataloader_persistent_workers, |
| skip_memory_metrics = skip_memory_metrics, |
| use_legacy_prediction_loop = use_legacy_prediction_loop, |
| push_to_hub = push_to_hub, |
| resume_from_checkpoint = resume_from_checkpoint, |
| hub_model_id = hub_model_id, |
| hub_strategy = hub_strategy, |
| hub_token = hub_token, |
| hub_private_repo = hub_private_repo, |
| hub_always_push = hub_always_push, |
| hub_revision = hub_revision, |
| gradient_checkpointing = gradient_checkpointing, |
| gradient_checkpointing_kwargs = gradient_checkpointing_kwargs, |
| include_inputs_for_metrics = include_inputs_for_metrics, |
| eval_do_concat_batches = eval_do_concat_batches, |
| fp16_backend = fp16_backend, |
| push_to_hub_model_id = push_to_hub_model_id, |
| push_to_hub_organization = push_to_hub_organization, |
| push_to_hub_token = push_to_hub_token, |
| mp_parameters = mp_parameters, |
| auto_find_batch_size = auto_find_batch_size, |
| full_determinism = full_determinism, |
| torchdynamo = torchdynamo, |
| ray_scope = ray_scope, |
| ddp_timeout = ddp_timeout, |
| torch_compile = torch_compile, |
| torch_compile_backend = torch_compile_backend, |
| torch_compile_mode = torch_compile_mode, |
| include_tokens_per_second = include_tokens_per_second, |
| include_num_input_tokens_seen = include_num_input_tokens_seen, |
| neftune_noise_alpha = neftune_noise_alpha, |
| optim_target_modules = optim_target_modules, |
| batch_eval_metrics = batch_eval_metrics, |
| eval_on_start = eval_on_start, |
| use_liger_kernel = use_liger_kernel, |
| liger_kernel_config = liger_kernel_config, |
| eval_use_gather_object = eval_use_gather_object, |
| average_tokens_across_devices = average_tokens_across_devices, |
| reward_model_path = reward_model_path, |
| judge = judge, |
| max_new_tokens = max_new_tokens, |
| max_length = max_length, |
| temperature = temperature, |
| missing_eos_penalty = missing_eos_penalty, |
| loss_type = loss_type, |
| dataset_num_proc = dataset_num_proc, |
| disable_dropout = disable_dropout, |
| use_vllm = use_vllm, |
| vllm_model_impl = vllm_model_impl, |
| gpu_memory_utilization = gpu_memory_utilization, |
| ds3_gather_for_generation = ds3_gather_for_generation, |
| model_init_kwargs = model_init_kwargs,**kwargs) |
| self.vllm_sampling_params = vllm_sampling_params |
| self.unsloth_num_chunks = unsloth_num_chunks |
| self.max_seq_length = max_seq_length |
| pass |
|
|
| class _UnslothOnlineDPOTrainer(Trainer): |
| r"""""" |
|
|
| _tag_names = ["trl", "online-dpo"] |
|
|
| def __init__( |
| self, |
| model: Union[PreTrainedModel, nn.Module, str], |
| ref_model: Union[PreTrainedModel, nn.Module, None] = None, |
| reward_model: Union[PreTrainedModel, nn.Module, None] = None, |
| judge: Optional[BasePairwiseJudge] = None, |
| args: Optional[OnlineDPOConfig] = None, |
| data_collator: Optional[DataCollator] = None, |
| train_dataset: Optional[Union[Dataset, IterableDataset, "datasets.Dataset"]] = None, |
| eval_dataset: Optional[Union[Dataset, dict[str, Dataset], "datasets.Dataset"]] = None, |
| processing_class: Optional[ |
| Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] |
| ] = None, |
| reward_processing_class: Optional[PreTrainedTokenizerBase] = None, |
| peft_config: Optional[dict] = None, |
| compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None, |
| callbacks: Optional[list[TrainerCallback]] = None, |
| optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), |
| preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, |
| ) -> None: |
|
|
| if hasattr(model, 'vllm_engine') and hasattr(args, 'use_vllm'): |
| if (getattr(args, 'use_vllm', False) == False): |
| args.use_vllm = True |
| if ref_model is model: |
| raise ValueError( |
| "`model` and `ref_model` cannot be the same object. If you want `ref_model` to be the " |
| "same as `model`, either omit the `ref_model` argument or pass `None`." |
| ) |
|
|
| self.ref_model = ref_model |
|
|
| if reward_model is not None and judge is not None: |
| logger.warning( |
| "Both `reward_model` and `judge` are provided. Please choose provide only one of them. " |
| "Ignoring `judge` and using `reward_model`.", |
| ) |
| judge = None |
| elif reward_model is None and judge is None: |
| raise ValueError("Either `reward_model` or `judge` must be provided.") |
|
|
| self.reward_model = reward_model |
| self.reward_processing_class = reward_processing_class |
| self.judge = judge |
|
|
| if args.missing_eos_penalty is not None and judge is not None: |
| raise ValueError("`missing_eos_penalty` is not supported when `judge` is provided.") |
|
|
| if args is None: |
| raise ValueError("`args` must be provided.") |
|
|
| |
| if processing_class is None: |
| raise ValueError("`processing_class` must be provided.") |
|
|
| model_init_kwargs = args.model_init_kwargs or {} |
| if isinstance(model, str): |
| model_id = model |
|
|
| |
| torch_dtype = model_init_kwargs.get("torch_dtype") |
| if isinstance(torch_dtype, torch.dtype) or torch_dtype == "auto" or torch_dtype is None: |
| pass |
| elif isinstance(torch_dtype, str): |
| torch_dtype = getattr(torch, torch_dtype) |
| model_init_kwargs["torch_dtype"] = torch_dtype |
| else: |
| raise ValueError( |
| "Invalid `torch_dtype` passed to `OnlineDPOConfig`. Expected either 'auto' or a string " |
| f"representing a `torch.dtype` (e.g., 'float32'), but got {torch_dtype}." |
| ) |
|
|
| model = AutoModelForCausalLM.from_pretrained(model_id, **model_init_kwargs) |
| else: |
| if args.model_init_kwargs is not None: |
| raise ValueError( |
| "You passed `model_init_kwargs` to the `OnlineDPOConfig`, but your model is already instantiated. " |
| "This argument can only be used when the `model` argument is a string." |
| ) |
| self.is_encoder_decoder = model.config.is_encoder_decoder |
|
|
| if False: |
| model = prepare_peft_model(model, peft_config, args) |
|
|
| |
| if args.disable_dropout: |
| disable_dropout_in_model(model) |
| if self.ref_model is not None: |
| disable_dropout_in_model(self.ref_model) |
|
|
| |
| |
| |
| |
| if ref_model is None: |
| if False: |
| self.ref_model = create_reference_model(model) |
| else: |
| self.ref_model = None |
| else: |
| self.ref_model = ref_model |
| self.ref_model.eval() |
|
|
| |
| if self.reward_model is not None: |
| self.reward_model.eval() |
|
|
| |
| if data_collator is None: |
| data_collator = DPODataCollatorWithPadding(pad_token_id=processing_class.pad_token_id) |
|
|
| self.max_length = args.max_length |
|
|
| self.stats = { |
| "objective/kl": [], |
| "objective/entropy": [], |
| "objective/non_score_reward": [], |
| "rewards/chosen": [], |
| "rewards/rejected": [], |
| "rewards/accuracies": [], |
| "rewards/margins": [], |
| "logps/chosen": [], |
| "logps/rejected": [], |
| "val/contain_eos_token": [], |
| "beta": [], |
| } |
| if self.reward_model is not None: |
| self.stats["objective/rlhf_reward"] = [] |
| self.stats["objective/scores_margin"] = [] |
| self.stats["objective/scores"] = [] |
|
|
| if args.use_vllm: |
| self.llm = model.vllm_engine; self._last_loaded_step = 0; self.generation_config = SamplingParams( |
| n=2, |
| max_tokens=args.max_new_tokens, |
| temperature=args.temperature, |
| top_k=50, |
| top_p=1.0, |
| detokenize=False, |
| **getattr(getattr(args, 'vllm_sampling_params', vLLMSamplingParams()), '_set_kwargs', {}), |
| ) |
| else: |
| self.generation_config = GenerationConfig( |
| max_new_tokens=args.max_new_tokens, |
| temperature=args.temperature, |
| top_k=50, |
| top_p=1.0, |
| do_sample=True, |
| use_cache=False if args.gradient_checkpointing else True, |
| ) |
|
|
| |
| |
| |
| |
| |
| |
| model.warnings_issued["estimate_tokens"] = True |
|
|
| super().__init__( |
| model=model, |
| args=args, |
| data_collator=data_collator, |
| train_dataset=train_dataset, |
| eval_dataset=eval_dataset, |
| processing_class=processing_class, |
| compute_metrics=compute_metrics, |
| callbacks=callbacks, |
| optimizers=optimizers, |
| preprocess_logits_for_metrics=preprocess_logits_for_metrics, |
| ) |
|
|
| |
| if hasattr(self.model, "add_model_tags"): |
| self.model.add_model_tags(self._tag_names) |
|
|
| self._beta = args.beta |
|
|
| |
| if self.is_deepspeed_enabled: |
| if self.reward_model is not None: |
| self.reward_model = prepare_deepspeed( |
| self.reward_model, args.per_device_train_batch_size, args.fp16, args.bf16 |
| ) |
| if self.ref_model is not None: |
| self.ref_model = prepare_deepspeed( |
| self.ref_model, args.per_device_train_batch_size, args.fp16, args.bf16 |
| ) |
| else: |
| if self.ref_model is not None: |
| self.ref_model = self.ref_model.to(self.accelerator.device) |
| if self.reward_model is not None: |
| self.reward_model = self.reward_model.to(self.accelerator.device) |
|
|
| @property |
| def beta(self): |
| if isinstance(self._beta, list): |
| epoch = self.state.epoch |
| return self._beta[epoch] if epoch < len(self._beta) else self._beta[-1] |
| else: |
| return self._beta |
|
|
| @staticmethod |
| def tokenize_row(feature, is_encoder_decoder: bool, tokenizer: PreTrainedTokenizerBase) -> dict[str, Any]: |
| """Tokenize a single row from a DPO specific dataset.""" |
| if not is_encoder_decoder: |
| batch = tokenizer(feature["prompt"], add_special_tokens=False) |
| |
| if tokenizer.bos_token_id is not None: |
| prompt_len_input_ids = len(batch["input_ids"]) |
| if prompt_len_input_ids == 0 or tokenizer.bos_token_id != batch["input_ids"][0]: |
| batch["input_ids"] = [tokenizer.bos_token_id] + batch["input_ids"] |
| batch["attention_mask"] = [1] + batch["attention_mask"] |
| else: |
| batch = tokenizer(feature["prompt"], add_special_tokens=True) |
| batch = {f"prompt_{key}": value for key, value in batch.items()} |
| return batch |
|
|
| |
| @wraps(Trainer.get_train_dataloader) |
| def get_train_dataloader(self) -> DataLoader: |
| if self.train_dataset is None: |
| raise ValueError("Trainer: training requires a train_dataset.") |
|
|
| train_dataset = self.train_dataset |
| data_collator = self.data_collator |
| dataloader_params = { |
| "batch_size": self._train_batch_size, |
| "collate_fn": data_collator, |
| "num_workers": self.args.dataloader_num_workers, |
| "pin_memory": self.args.dataloader_pin_memory, |
| "persistent_workers": self.args.dataloader_persistent_workers, |
| } |
|
|
| if not isinstance(train_dataset, torch.utils.data.IterableDataset): |
| dataloader_params["sampler"] = self._get_train_sampler() |
| dataloader_params["drop_last"] = self.args.dataloader_drop_last |
| dataloader_params["worker_init_fn"] = seed_worker |
| dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor |
|
|
| return self.accelerator.prepare(DataLoader(train_dataset, **dataloader_params)) |
|
|
| |
| @wraps(Trainer.get_eval_dataloader) |
| def get_eval_dataloader(self, eval_dataset: Optional[Union[str, Dataset]] = None) -> DataLoader: |
| if eval_dataset is None and self.eval_dataset is None: |
| raise ValueError("Trainer: evaluation requires an eval_dataset.") |
|
|
| |
| |
| dataloader_key = eval_dataset if isinstance(eval_dataset, str) else "eval" |
| if ( |
| hasattr(self, "_eval_dataloaders") |
| and dataloader_key in self._eval_dataloaders |
| and self.args.dataloader_persistent_workers |
| ): |
| return self.accelerator.prepare(self._eval_dataloaders[dataloader_key]) |
|
|
| eval_dataset = ( |
| self.eval_dataset[eval_dataset] |
| if isinstance(eval_dataset, str) |
| else eval_dataset |
| if eval_dataset is not None |
| else self.eval_dataset |
| ) |
| data_collator = self.data_collator |
|
|
| dataloader_params = { |
| "batch_size": self.args.eval_batch_size, |
| "collate_fn": data_collator, |
| "num_workers": self.args.dataloader_num_workers, |
| "pin_memory": self.args.dataloader_pin_memory, |
| "persistent_workers": self.args.dataloader_persistent_workers, |
| } |
|
|
| if not isinstance(eval_dataset, torch.utils.data.IterableDataset): |
| dataloader_params["sampler"] = self._get_eval_sampler(eval_dataset) |
| dataloader_params["drop_last"] = self.args.dataloader_drop_last |
| dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor |
|
|
| |
| |
| eval_dataloader = DataLoader(eval_dataset, **dataloader_params) |
| if self.args.dataloader_persistent_workers: |
| if hasattr(self, "_eval_dataloaders"): |
| self._eval_dataloaders[dataloader_key] = eval_dataloader |
| else: |
| self._eval_dataloaders = {dataloader_key: eval_dataloader} |
|
|
| return self.accelerator.prepare(eval_dataloader) |
|
|
| def _generate_vllm(self, model, prompts): |
| eos_token_id = self.processing_class.eos_token_id |
| pad_token_id = self.processing_class.pad_token_id |
|
|
| |
|
|
| pass |
|
|
| pass |
|
|
| if is_conversational({"prompt": prompts[0]}): |
| outputs = self.llm.chat(prompts, self.generation_config, use_tqdm=False, lora_request = self.model.load_lora('online_dpo_trainer_lora_model', load_tensors = True)) |
| else: |
| outputs = self.llm.generate(prompts, self.generation_config, use_tqdm=False, lora_request = self.model.load_lora('online_dpo_trainer_lora_model', load_tensors = True)) |
|
|
| completion_ids = [list(output.outputs[i].token_ids) for i in range(2) for output in outputs] |
| prompt_ids = [list(output.prompt_token_ids) for _ in range(2) for output in outputs] |
|
|
| |
| max_prompt_length = max(len(ids) for ids in prompt_ids) |
| prompt_mask = [[0] * (max_prompt_length - len(ids)) + [1] * len(ids) for ids in prompt_ids] |
| prompt_ids = [[pad_token_id] * (max_prompt_length - len(ids)) + ids for ids in prompt_ids] |
| max_tokens = self.generation_config.max_tokens |
| completion_mask = [[1] * len(ids) + [0] * (max_tokens - len(ids)) for ids in completion_ids] |
| completion_ids = [ |
| ids + [eos_token_id] if ids[-1] != eos_token_id and len(ids) < max_tokens else ids |
| for ids in completion_ids |
| ] |
| completion_ids = [ids + [pad_token_id] * (max_tokens - len(ids)) for ids in completion_ids] |
|
|
| |
| prompt_ids = torch.tensor(prompt_ids, device=self.accelerator.device) |
| prompt_mask = torch.tensor(prompt_mask, device=self.accelerator.device) |
| completion_ids = torch.tensor(completion_ids, device=self.accelerator.device) |
| completion_mask = torch.tensor(completion_mask, device=self.accelerator.device) |
|
|
| return prompt_ids, prompt_mask, completion_ids, completion_mask |
|
|
| def _generate(self, model, prompts): |
| eos_token_id = self.processing_class.eos_token_id |
| pad_token_id = self.processing_class.pad_token_id |
|
|
| |
| |
| inputs = [{"prompt": prompt} for prompt in prompts] |
| inputs = [maybe_apply_chat_template(x, self.processing_class) for x in inputs] |
| inputs = [self.tokenize_row(x, self.is_encoder_decoder, self.processing_class) for x in inputs] |
| inputs = self.data_collator(inputs) |
|
|
| |
| inputs = self._prepare_inputs(inputs) |
| prompt_ids = inputs["prompt_input_ids"].repeat(2, 1) |
| prompt_mask = inputs["prompt_attention_mask"].repeat(2, 1) |
| with unwrap_model_for_generation( |
| model, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation |
| ) as unwrapped_model: |
| output = unwrapped_model.generate( |
| input_ids=prompt_ids, |
| attention_mask=prompt_mask, |
| generation_config=self.generation_config, |
| ) |
|
|
| completion_ids = output[:, prompt_ids.size(1) :] |
| completion_ids, completion_mask = truncate_right(completion_ids, eos_token_id, pad_token_id) |
|
|
| return prompt_ids, prompt_mask, completion_ids, completion_mask |
|
|
| def _forward(self, model, prompt_ids, prompt_mask, completion_ids, completion_mask): |
| |
| num_tokens_to_truncate = max(prompt_ids.size(1) + completion_ids.size(1) - self.max_length, 0) |
|
|
| |
| prompt_ids = prompt_ids[:, num_tokens_to_truncate:] |
| prompt_mask = prompt_mask[:, num_tokens_to_truncate:] |
|
|
| |
| prompt_completion_ids = torch.cat((prompt_ids, completion_ids), dim=1) |
| prompt_completion_mask = torch.cat((prompt_mask, completion_mask), dim=1) |
|
|
| |
| output = model(prompt_completion_ids, attention_mask=prompt_completion_mask) |
|
|
| |
| prompt_len = prompt_ids.size(1) |
| start_idx = prompt_len - 1 if prompt_len > 0 else 0 |
| logits = output.logits[:, start_idx:-1] |
|
|
| |
| logprobs = torch.take_along_dim(logits.log_softmax(dim=-1), completion_ids.unsqueeze(-1), dim=2).squeeze(-1) |
| return logprobs |
|
|
| def training_step( |
| self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], num_items_in_batch: Optional[int] = None |
| ) -> torch.Tensor: |
| model.train() |
|
|
| prompts = inputs["prompt"] |
| batch_size = len(prompts) |
|
|
| if self.args.use_vllm: |
| prompt_ids, prompt_mask, completion_ids, completion_mask = self._generate_vllm(model, prompts) |
| else: |
| prompt_ids, prompt_mask, completion_ids, completion_mask = self._generate(model, prompts) |
|
|
| contain_eos_token = torch.any(completion_ids == self.processing_class.eos_token_id, dim=-1) |
|
|
| logprobs = self._forward(model, prompt_ids, prompt_mask, completion_ids, completion_mask) |
| with torch.no_grad(): |
| if self.ref_model is not None: |
| ref_logprobs = self._forward(self.ref_model, prompt_ids, prompt_mask, completion_ids, completion_mask) |
| else: |
| with self.model.disable_adapter(): |
| ref_logprobs = self._forward(self.model, prompt_ids, prompt_mask, completion_ids, completion_mask) |
|
|
| |
| device = logprobs.device |
| completions = self.processing_class.batch_decode(completion_ids, skip_special_tokens=True) |
| if is_conversational({"prompt": prompts[0]}): |
| completions = [[{"role": "assistant", "content": completion}] for completion in completions] |
|
|
| |
| if self.judge is not None: |
| |
| |
| |
| |
| if is_conversational({"prompt": prompts[0]}): |
| environment = jinja2.Environment() |
| template = environment.from_string(SIMPLE_CHAT_TEMPLATE) |
| prompts = [template.render(messages=prompt) for prompt in prompts] |
| completions = [template.render(messages=completion) for completion in completions] |
|
|
| ranks_of_first_completion = self.judge.judge( |
| prompts, list(zip(completions[:batch_size], completions[batch_size:])) |
| ) |
|
|
| |
| |
| |
| mask = torch.tensor([rank == 0 for rank in ranks_of_first_completion], device=device) |
| else: |
| |
| |
| prompts = 2 * prompts |
| if is_conversational({"prompt": prompts[0]}): |
| examples = [{"prompt": p, "completion": c} for p, c in zip(prompts, completions)] |
| examples = [apply_chat_template(example, self.reward_processing_class) for example in examples] |
| prompts = [example["prompt"] for example in examples] |
| completions = [example["completion"] for example in examples] |
|
|
| |
| prompts_ids = self.reward_processing_class( |
| prompts, padding=True, return_tensors="pt", padding_side="left" |
| )["input_ids"].to(device) |
| context_length = prompts_ids.shape[1] |
|
|
| |
| completions_ids = self.reward_processing_class( |
| completions, padding=True, return_tensors="pt", padding_side="right" |
| )["input_ids"].to(device) |
|
|
| |
| prompt_completion_ids = torch.cat((prompts_ids, completions_ids), dim=1) |
| with torch.inference_mode(): |
| _, scores, _ = get_reward( |
| self.reward_model, prompt_completion_ids, self.reward_processing_class.pad_token_id, context_length |
| ) |
|
|
| |
| |
| if self.args.missing_eos_penalty is not None: |
| scores[~contain_eos_token] -= self.args.missing_eos_penalty |
|
|
| |
| first_half, second_half = scores.split(batch_size) |
|
|
| |
| mask = first_half >= second_half |
|
|
| batch_range = torch.arange(batch_size, device=device) |
| chosen_indices = batch_range + (~mask * batch_size) |
| rejected_indices = batch_range + (mask * batch_size) |
|
|
| |
| cr_indices = torch.cat((chosen_indices, rejected_indices), dim=0) |
| cr_logprobs = logprobs[cr_indices] |
| cr_ref_logprobs = ref_logprobs[cr_indices] |
|
|
| |
| padding_mask = ~completion_mask.bool() |
| cr_padding_mask = padding_mask[cr_indices] |
|
|
| cr_logprobs_sum = (cr_logprobs * ~cr_padding_mask).sum(1) |
| cr_ref_logprobs_sum = (cr_ref_logprobs * ~cr_padding_mask).sum(1) |
|
|
| |
| chosen_logprobs_sum, rejected_logprobs_sum = torch.split(cr_logprobs_sum, batch_size) |
| chosen_ref_logprobs_sum, rejected_ref_logprobs_sum = torch.split(cr_ref_logprobs_sum, batch_size) |
| pi_logratios = chosen_logprobs_sum - rejected_logprobs_sum |
| ref_logratios = chosen_ref_logprobs_sum - rejected_ref_logprobs_sum |
|
|
| logits = pi_logratios - ref_logratios |
|
|
| if self.args.loss_type == "sigmoid": |
| losses = -F.logsigmoid(self.beta * logits) |
| elif self.args.loss_type == "ipo": |
| losses = (logits - 1 / (2 * self.beta)) ** 2 |
| else: |
| raise NotImplementedError(f"invalid loss type {self.loss_type}") |
|
|
| loss = losses.mean() |
|
|
| |
| if self.reward_model is not None: |
| scores_margin = scores[chosen_indices] - scores[rejected_indices] |
| self.stats["objective/scores_margin"].append( |
| self.accelerator.gather_for_metrics(scores_margin.mean()).mean().item() |
| ) |
| self.stats["objective/scores"].append(self.accelerator.gather_for_metrics(scores.mean()).mean().item()) |
| self.stats["val/contain_eos_token"].append(contain_eos_token.float().mean().item()) |
| self.stats["logps/chosen"].append(self.accelerator.gather_for_metrics(chosen_logprobs_sum).mean().item()) |
| self.stats["logps/rejected"].append(self.accelerator.gather_for_metrics(rejected_logprobs_sum).mean().item()) |
|
|
| kl = logprobs - ref_logprobs |
| mean_kl = kl.sum(1).mean() |
| self.stats["objective/kl"].append(self.accelerator.gather_for_metrics(mean_kl).mean().item()) |
| non_score_reward = (-self.beta * kl).sum(1) |
| mean_non_score_reward = non_score_reward.mean() |
| self.stats["objective/non_score_reward"].append( |
| self.accelerator.gather_for_metrics(mean_non_score_reward).mean().item() |
| ) |
| if self.reward_model is not None: |
| rlhf_reward = scores + non_score_reward |
| self.stats["objective/rlhf_reward"].append(self.accelerator.gather_for_metrics(rlhf_reward).mean().item()) |
| mean_entropy = -logprobs.sum(1).mean() |
| self.stats["objective/entropy"].append(self.accelerator.gather_for_metrics(mean_entropy).mean().item()) |
| chosen_rewards = self.beta * (chosen_logprobs_sum - chosen_ref_logprobs_sum) |
| gathered_chosen_rewards = self.accelerator.gather_for_metrics(chosen_rewards) |
| self.stats["rewards/chosen"].append(gathered_chosen_rewards.mean().item()) |
| rejected_rewards = self.beta * (rejected_logprobs_sum - rejected_ref_logprobs_sum) |
| gathered_rejected_rewards = self.accelerator.gather_for_metrics(rejected_rewards) |
| self.stats["rewards/rejected"].append(gathered_rejected_rewards.mean().item()) |
| margin = gathered_chosen_rewards - gathered_rejected_rewards |
| self.stats["rewards/margins"].append(margin.mean().item()) |
| accuracy = margin > 0 |
| self.stats["rewards/accuracies"].append(accuracy.float().mean().item()) |
| self.stats["beta"].append(self.beta) |
|
|
| if ( |
| self.args.torch_empty_cache_steps is not None |
| and self.state.global_step % self.args.torch_empty_cache_steps == 0 |
| ): |
| empty_cache() |
|
|
| kwargs = {} |
|
|
| |
| if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: |
| kwargs["learning_rate"] = self._get_learning_rate() |
|
|
| if self.args.n_gpu > 1: |
| loss = loss.mean() |
|
|
| if self.use_apex: |
| with amp.scale_loss(loss, self.optimizer) as scaled_loss: |
| scaled_loss.backward() |
| else: |
| self.accelerator.backward(loss, **kwargs) |
|
|
| return loss.detach() / self.args.gradient_accumulation_steps |
|
|
| |
| def _maybe_log_save_evaluate( |
| self, tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time, learning_rate=None |
| ): |
| if self.control.should_log and self.state.global_step > self._globalstep_last_logged: |
| logs: dict[str, float] = {} |
|
|
| |
| tr_loss_scalar = self._nested_gather(tr_loss).mean().item() |
|
|
| |
| tr_loss -= tr_loss |
|
|
| logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) |
| if grad_norm is not None: |
| logs["grad_norm"] = grad_norm.detach().item() if isinstance(grad_norm, torch.Tensor) else grad_norm |
| if learning_rate is not None: |
| logs["learning_rate"] = learning_rate |
| else: |
| logs["learning_rate"] = self._get_learning_rate() |
|
|
| |
| for key, val in self.stats.items(): |
| logs[key] = sum(val) / len(val) |
| self.stats = {key: [] for key in self.stats} |
|
|
| self._total_loss_scalar += tr_loss_scalar |
| self._globalstep_last_logged = self.state.global_step |
| self.store_flos() |
| self.log(logs, start_time) |
|
|
| metrics = None |
| if self.control.should_evaluate: |
| metrics = self._evaluate(trial, ignore_keys_for_eval) |
| is_new_best_metric = self._determine_best_metric(metrics=metrics, trial=trial) |
|
|
| if self.args.save_strategy == "best": |
| self.control.should_save = is_new_best_metric |
|
|
| if self.control.should_save: |
| self._save_checkpoint(model, trial) |
| self.control = self.callback_handler.on_save(self.args, self.state, self.control) |
|
|
| |
| def _save_checkpoint(self, model, trial): |
| if self.args.hub_model_id is None: |
| model_name = Path(self.args.output_dir).name |
| else: |
| model_name = self.args.hub_model_id.split("/")[-1] |
| self.create_model_card(model_name=model_name) |
| super()._save_checkpoint(model, trial) |
|
|
| def create_model_card( |
| self, |
| model_name: Optional[str] = None, |
| dataset_name: Optional[str] = None, |
| tags: Union[str, list[str], None] = None, |
| ): |
| """ |
| Creates a draft of a model card using the information available to the `Trainer`. |
| |
| Args: |
| model_name (`str` or `None`, *optional*, defaults to `None`): |
| Name of the model. |
| dataset_name (`str` or `None`, *optional*, defaults to `None`): |
| Name of the dataset used for training. |
| tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): |
| Tags to be associated with the model card. |
| """ |
| if not self.is_world_process_zero(): |
| return |
|
|
| if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): |
| base_model = self.model.config._name_or_path |
| else: |
| base_model = None |
|
|
| |
| if tags is None: |
| tags = set() |
| elif isinstance(tags, str): |
| tags = {tags} |
| else: |
| tags = set(tags) |
|
|
| if hasattr(self.model.config, "unsloth_version"): |
| tags.add("unsloth") |
|
|
| if "JOB_ID" in os.environ: |
| tags.add("hf_jobs") |
|
|
| tags.update(self._tag_names) |
|
|
| |
| citation = textwrap.dedent("""\ |
| @article{guo2024direct, |
| title = {{Direct Language Model Alignment from Online AI Feedback}}, |
| author = {Shangmin Guo and Biao Zhang and Tianlin Liu and Tianqi Liu and Misha Khalman and Felipe Llinares and Alexandre Ram{\'{e}} and Thomas Mesnard and Yao Zhao and Bilal Piot and Johan Ferret and Mathieu Blondel}, |
| year = 2024, |
| eprint = {arXiv:2402.04792} |
| }""") |
|
|
| model_card = generate_model_card( |
| base_model=base_model, |
| model_name=model_name, |
| hub_model_id=self.hub_model_id, |
| dataset_name=dataset_name, |
| tags=tags, |
| wandb_url=wandb.run.url if is_wandb_available() and wandb.run is not None else None, |
| comet_url=get_comet_experiment_url(), |
| trainer_name="Online DPO", |
| trainer_citation=citation, |
| paper_title="Direct Language Model Alignment from Online AI Feedback", |
| paper_id="2402.04792", |
| ) |
| model_card.save(os.path.join(self.args.output_dir, "README.md")) |
| class UnslothOnlineDPOTrainer(_UnslothOnlineDPOTrainer): |
| """ |
| |
| Initialize OnlineDPOTrainer. |
| |
| Args: |
| model (`Union[str, nn.Module, PreTrainedModel]`): |
| Model to be trained. Can be either: |
| |
| - A string, being the *model id* of a pretrained model hosted inside a model repo on huggingface.co, or a |
| path to a *directory* containing model weights saved using |
| [`~transformers.PreTrainedModel.save_pretrained`], e.g., `'./my_model_directory/'`. The model is loaded |
| using [`~transformers.AutoModelForCausalLM.from_pretrained`] with the keyword arguments in |
| `args.model_init_kwargs`. |
| - A [`~transformers.PreTrainedModel`] object. Only causal language models are supported. |
| ref_model (`transformers.PreTrainedModel` or `torch.nn.Module` or `None`): |
| The reference model to use for training. If None is specified, the reference model will be created from the |
| model. |
| reward_model (`transformers.PreTrainedModel` or `torch.nn.Module` or `None`): |
| The reward model to score completions with, preferably an `AutoModelForSequenceClassification`. |
| judge (`BasePairwiseJudge`): |
| The judge to use for pairwise comparison of model completions. |
| args (`OnlineDPOConfig`): |
| The online DPO config arguments to use for training. |
| data_collator (`transformers.DataCollator`): |
| The data collator to use for training. If None is specified, the default data collator |
| (`DPODataCollatorWithPadding`) will be used which will pad the sequences to the maximum length of the |
| sequences in the batch, given a dataset of paired sequences. |
| train_dataset (`datasets.Dataset`): |
| The dataset to use for training. |
| eval_dataset (`datasets.Dataset`): |
| The dataset to use for evaluation. |
| processing_class ([`~transformers.PreTrainedTokenizerBase`], [`~transformers.BaseImageProcessor`], [`~transformers.FeatureExtractionMixin`] or [`~transformers.ProcessorMixin`], *optional*, defaults to `None`): |
| Processing class used to process the data. If provided, will be used to automatically process the inputs |
| for the model, and it will be saved along the model to make it easier to rerun an interrupted training or |
| reuse the fine-tuned model. |
| peft_config (`dict`): |
| The peft config to use for training. |
| compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*): |
| The function to use to compute the metrics. Must take a `EvalPrediction` and return a dictionary string to |
| metric values. |
| callbacks (`list[transformers.TrainerCallback]`): |
| The callbacks to use for training. |
| optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): |
| The optimizer and scheduler to use for training. |
| preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): |
| The function to use to preprocess the logits before computing the metrics. |
| |
| """ |
| def __init__( |
| self, |
| model, |
| ref_model = None, |
| reward_model = None, |
| judge = None, |
| args = None, |
| data_collator = None, |
| train_dataset = None, |
| eval_dataset = None, |
| processing_class = None, |
| reward_processing_class = None, |
| peft_config = None, |
| compute_metrics = None, |
| callbacks = None, |
| preprocess_logits_for_metrics = None, |
| **kwargs |
| ): |
| if args is None: args = UnslothOnlineDPOConfig() |
| use_bf16 = getattr(args, 'bf16', False) |
| if type(use_bf16) is not bool: use_bf16 = False |
| use_fp16 = getattr(args, 'fp16', False) |
| if type(use_fp16) is not bool: use_fp16 = False |
| force_float32 = False |
| full_finetuning = os.environ.get('UNSLOTH_ENABLE_FULL_FINETUNING', '0') == '1' |
| if not full_finetuning and (os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1'): |
| print('Unsloth: Switching to float32 training since model cannot work with float16') |
| force_float32 = True |
| mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') |
| dtype = getattr(model.config, 'dtype', None) or getattr(model.config, 'torch_dtype', None) |
| if dtype is None: dtype = model.get_input_embeddings().dtype |
| from unsloth_zoo.utils import _get_dtype |
| dtype = _get_dtype(dtype) |
| float16 = dtype == torch.float16 |
| if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`') |
| if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`') |
| if force_float32: |
| |
| args.fp16 = False |
| args.bf16 = False |
| os.environ['ACCELERATE_MIXED_PRECISION'] = 'no' |
| elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32': |
| |
| args.fp16 = float16 |
| args.bf16 = not float16 |
| os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16' |
| if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no': |
| args.eval_strategy = 'steps' |
| if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1 |
| ga_steps = getattr(args, 'gradient_accumulation_steps', None) |
| if ga_steps is not None and ga_steps > 1: |
| from transformers import __version__ as transformers_version |
| if Version(transformers_version) <= Version('4.45.2'): |
| print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n' |
| '`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`') |
| if getattr(args, 'eval_strategy', 'no') != 'no': |
| eval_bsz = getattr(args, 'per_device_eval_batch_size', 8) |
| if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size |
| if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps |
| fp16_full_eval = getattr(args, 'fp16_full_eval', False) |
| if type(fp16_full_eval) is not bool: fp16_full_eval = False |
| bf16_full_eval = getattr(args, 'bf16_full_eval', False) |
| if type(bf16_full_eval) is not bool: bf16_full_eval = False |
| if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True |
| if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False |
| if force_float32: |
| args.bf16_full_eval = False |
| args.fp16_full_eval = False |
| elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16': |
| args.bf16_full_eval = True |
| args.fp16_full_eval = False |
| elif not bf16_full_eval and not fp16_full_eval: |
| args.bf16_full_eval = args.bf16 |
| args.fp16_full_eval = args.fp16 |
| _output_logits = False |
| if locals().get('compute_metrics', None) is not None: _output_logits = True |
| if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True |
| if _output_logits: |
| os.environ['UNSLOTH_RETURN_LOGITS'] = '1' |
| if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'): |
| pass |
| else: |
| model_max_seq_length = getattr(model, 'max_seq_length', None) |
| args_max_seq_length = getattr(args, 'max_seq_length', None) |
| if args_max_seq_length is None and model_max_seq_length is not None: |
| max_seq_length = model.max_seq_length |
| if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length |
| if model is not None and hasattr(model, 'for_training'): |
| model.for_training() |
| if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right' |
| if 'processing_class' in locals(): |
| if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right' |
| if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right' |
| __tokenizer = processing_class if 'processing_class' in locals() else tokenizer |
| from unsloth_zoo.vision_utils import UnslothVisionDataCollator |
| if not isinstance(data_collator, UnslothVisionDataCollator): |
| if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names: |
| data_collator = TransformersDataCollatorForLanguageModeling( |
| __tokenizer, |
| mlm = False, |
| mlm_probability = 0.0, |
| pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None), |
| ) |
| elif isinstance(data_collator, TransformersDataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names: |
| data_collator = DataCollatorForSeq2Seq( |
| __tokenizer, |
| pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None), |
| ) |
| else: |
| if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False |
| if hasattr(args, 'dataset_text_field'): args.dataset_text_field = '' |
| if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True} |
| if not isinstance(data_collator, UnslothVisionDataCollator): |
| if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'): |
| if isinstance(data_collator, DataCollatorForSeq2Seq): |
| data_collator = DataCollatorForSeq2Seq( |
| __tokenizer.tokenizer, |
| pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None), |
| ) |
| else: |
| data_collator = TransformersDataCollatorForLanguageModeling( |
| __tokenizer.tokenizer, |
| mlm = False, |
| mlm_probability = 0.0, |
| pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None), |
| ) |
| other_metrics = [] |
| |
| from unsloth_zoo.logging_utils import PatchRLStatistics |
| PatchRLStatistics('online_dpo_trainer', other_metrics) |
| |
| |
| |
| if getattr(args, "parallel_mode", None) == ParallelMode.NOT_DISTRIBUTED and args.n_gpu > 1: |
| if getattr(args, "_n_gpu", 1) != 1: |
| args._n_gpu = 1 |
| if "model" in locals() and hasattr(model, "for_training"): |
| model.for_training() |
| super().__init__( |
| model = model, |
| ref_model = ref_model, |
| reward_model = reward_model, |
| judge = judge, |
| args = args, |
| data_collator = data_collator, |
| train_dataset = train_dataset, |
| eval_dataset = eval_dataset, |
| processing_class = processing_class, |
| reward_processing_class = reward_processing_class, |
| peft_config = peft_config, |
| compute_metrics = compute_metrics, |
| callbacks = callbacks, |
| preprocess_logits_for_metrics = preprocess_logits_for_metrics,**kwargs) |
| if "model" in locals() and hasattr(model, "for_inference"): |
| model.for_inference() |
| if hasattr(self, 'neftune_hook_handle'): |
| self.neftune_hook_handle.remove() |
| if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle |
| if getattr(args, 'neftune_noise_alpha', None) is not None: |
| model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha |
| pass |
| if hasattr(self, 'accelerator'): |
| scaler = self.accelerator.scaler |
| current_model = model |
| while hasattr(current_model, 'model'): |
| current_model.accelerator_scaler = scaler |
| current_model = current_model.model |
| current_model.accelerator_scaler = scaler |
| pass |
| if hasattr(self, 'train'): |
| self.train = MethodType(prepare_for_training_mode(self.__class__.train), self) |
| pass |
| |
| pass |
|
|
|
|
| if hasattr(logger, "addFilter"): |
| import logging |
| class HideLoggingMessage(logging.Filter): |
| def __init__(self, text): self.text = text |
| def filter(self, x): return not (self.text in x.getMessage()) |
| pass |
| logger.addFilter(HideLoggingMessage("`use_cache=True`")) |
|
|
|
|