| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import itertools |
| from contextlib import contextmanager |
| from dataclasses import dataclass |
| from typing import TYPE_CHECKING, Literal, Optional, Tuple, Union |
|
|
| from accelerate.utils import is_deepspeed_available |
| from transformers import PreTrainedModel, PreTrainedTokenizer |
|
|
| from .modeling_value_head import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead |
|
|
|
|
| SUPPORTED_ARCHITECTURES = ( |
| AutoModelForCausalLMWithValueHead, |
| AutoModelForSeq2SeqLMWithValueHead, |
| ) |
|
|
| if is_deepspeed_available(): |
| import deepspeed |
|
|
| if TYPE_CHECKING: |
| from accelerate import Accelerator |
| from deepspeed.runtime.engine import DeepSpeedEngine |
| from torch.nn.parallel.distributed import DistributedDataParallel |
|
|
| from .modeling_base import PreTrainedModelWrapper |
|
|
|
|
| |
| @dataclass |
| class ChatMlSpecialTokens: |
| """Dataclass for special tokens used in ChatML, including system, user, assistant, bos, eos, and pad tokens.""" |
|
|
| bos_token: str = "<|im_start|>" |
| eos_token: str = "<|im_end|>" |
| pad_token: str = "<|im_end|>" |
|
|
| @property |
| def system(self): |
| return f"{self.bos_token}system" |
|
|
| @property |
| def user(self): |
| return f"{self.bos_token}user" |
|
|
| @property |
| def assistant(self): |
| return f"{self.bos_token}assistant" |
|
|
| @property |
| def chat_template(self): |
| return ( |
| "{% for message in messages %}" |
| f"{{{{'{self.bos_token}' + message['role'] + '\n' + message['content'] + '{self.eos_token}' + '\n'}}}}" |
| "{% endfor %}" |
| "{% if add_generation_prompt %}" |
| f"{{{{ '{self.assistant}\n' }}}}" |
| "{% endif %}" |
| ) |
|
|
|
|
| FORMAT_MAPPING = {"chatml": ChatMlSpecialTokens} |
|
|
|
|
| def setup_chat_format( |
| model: PreTrainedModel, |
| tokenizer: PreTrainedTokenizer, |
| format: Optional[Literal["chatml"]] = "chatml", |
| resize_to_multiple_of: Optional[int] = None, |
| ) -> Tuple[PreTrainedModel, PreTrainedTokenizer]: |
| """ |
| Setup chat format by adding special tokens to the tokenizer, setting the correct format, and extending the embedding layer of the model based on the new special tokens. |
| |
| If the model already has a chat template, this will throw an error. If you want to overwrite it, please set `tokenizer.chat_template` to `None`. |
| |
| Args: |
| model (`~transformers.PreTrainedModel`): The model to be modified. |
| tokenizer (`~transformers.PreTrainedTokenizer`): The tokenizer to be modified. |
| format (`Optional[Literal["chatml"]]`): The format to be set. Defaults to "chatml". |
| resize_to_multiple_of (`Optional[int]`): Number to resize the embedding layer to. Defaults to None. |
| |
| Returns: |
| model (`~transformers.PreTrainedModel`): The modified model. |
| tokenizer (`~transformers.PreTrainedTokenizer`): The modified tokenizer. |
| """ |
| |
| if tokenizer.chat_template is not None: |
| raise ValueError( |
| "Chat template is already added to the tokenizer. If you want to overwrite it, please set it to None" |
| ) |
|
|
| |
| if format not in FORMAT_MAPPING: |
| raise ValueError(f"Format {format} not available. Please use one of {FORMAT_MAPPING.keys()}") |
|
|
| chat_format = FORMAT_MAPPING[format]() |
|
|
| |
| tokenizer.eos_token = chat_format.eos_token |
| tokenizer.pad_token = chat_format.pad_token |
| tokenizer.bos_token = chat_format.bos_token |
| tokenizer.add_special_tokens({"additional_special_tokens": [chat_format.bos_token, chat_format.eos_token]}) |
| |
| tokenizer.chat_template = chat_format.chat_template |
|
|
| |
| model.resize_token_embeddings( |
| len(tokenizer), pad_to_multiple_of=resize_to_multiple_of if resize_to_multiple_of is not None else None |
| ) |
| |
| if getattr(model, "config", None) is not None: |
| model.config.pad_token_id = tokenizer.pad_token_id |
| model.config.bos_token_id = tokenizer.bos_token_id |
| model.config.eos_token_id = tokenizer.eos_token_id |
| |
| if getattr(model, "generation_config", None) is not None: |
| model.generation_config.bos_token_id = tokenizer.bos_token_id |
| model.generation_config.eos_token_id = tokenizer.eos_token_id |
| model.generation_config.pad_token_id = tokenizer.pad_token_id |
|
|
| return model, tokenizer |
|
|
|
|
| def remove_hooks(model: "DeepSpeedEngine") -> None: |
| """Removes the optimizer hooks from a DeepSpeed ZeRO-3 model.""" |
| if model.optimizer is not None and hasattr(model.optimizer, "parameter_offload"): |
| optimizer_offload = model.optimizer.parameter_offload |
| elif model.optimizer is not None: |
| optimizer_offload = model.optimizer |
|
|
| for param in iter_params(optimizer_offload.module, recurse=True): |
| param.ds_active_sub_modules.clear() |
|
|
| for hook in optimizer_offload.forward_hooks: |
| hook.remove() |
| for hook in optimizer_offload.backward_hooks: |
| hook.remove() |
|
|
| optimizer_offload.forward_hooks = [] |
| optimizer_offload.backward_hooks = [] |
|
|
|
|
| def get_all_parameters(sub_module, recurse=False): |
| return itertools.chain(sub_module.named_parameters(recurse=recurse), sub_module.ds_external_parameters()) |
|
|
|
|
| def iter_params(module, recurse=False): |
| return [param for _, param in get_all_parameters(module, recurse)] |
|
|
|
|
| def add_hooks(model: "DeepSpeedEngine") -> None: |
| """Adds the optimizer hooks from a DeepSpeed ZeRO-3 model.""" |
| if model.optimizer is not None and hasattr(model.optimizer, "parameter_offload"): |
| optimizer_offload = model.optimizer.parameter_offload |
| elif model.optimizer is not None: |
| optimizer_offload = model.optimizer |
| optimizer_offload._register_hooks_recursively(optimizer_offload.module) |
|
|
|
|
| @contextmanager |
| def unwrap_model_for_generation( |
| model: Union["DistributedDataParallel", "DeepSpeedEngine"], accelerator: "Accelerator", is_peft_model: bool = False |
| ) -> Union["PreTrainedModelWrapper", "DeepSpeedEngine"]: |
| """Context manager to unwrap a model for generation. |
| For ZeRO-3 models, we gather the weights once to speed up generation. |
| """ |
| unwrapped_model = accelerator.unwrap_model(model) |
| if is_peft_model: |
| unwrapped_model.pretrained_model.disable_adapter() |
| if accelerator.state.deepspeed_plugin is not None and accelerator.state.deepspeed_plugin.zero_stage == 3: |
| with deepspeed.zero.GatheredParameters(model.parameters()): |
| remove_hooks(model) |
| yield accelerator.unwrap_model(model) |
| add_hooks(model) |
| else: |
| yield unwrapped_model |
|
|