hc99's picture
Add files using upload-large-folder tool
dee9fba verified
raw
history blame
4.97 kB
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List, Literal, Optional
@dataclass
class ModelConfig:
"""
Configuration class for the models.
Using [`~transformers.HfArgumentParser`] we can turn this class into
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
command line.
Parameters:
model_name_or_path (`Optional[str]`, *optional*, defaults to `None`):
Model checkpoint for weights initialization.
model_revision (`str`, *optional*, defaults to `"main"`):
Specific model version to use. It can be a branch name, a tag name, or a commit id.
torch_dtype (`Optional[Literal["auto", "bfloat16", "float16", "float32"]]`, *optional*, defaults to `None`):
Override the default `torch.dtype` and load the model under this dtype. Possible values are
- `"bfloat16"`: `torch.bfloat16`
- `"float16"`: `torch.float16`
- `"float32"`: `torch.float32`
- `"auto"`: Automatically derive the dtype from the model's weights.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether to allow for custom models defined on the Hub in their own modeling files. This option should only
be set to `True` for repositories you trust and in which you have read the code, as it will execute code
present on the Hub on your local machine.
attn_implementation (`Optional[str]`, *optional*, defaults to `None`):
Which attention implementation to use. You can run `--attn_implementation=flash_attention_2`, in which case
you must install this manually by running `pip install flash-attn --no-build-isolation`.
use_peft (`bool`, *optional*, defaults to `False`):
Whether to use PEFT for training.
lora_r (`int`, *optional*, defaults to `16`):
LoRA R value.
lora_alpha (`int`, *optional*, defaults to `32`):
LoRA alpha.
lora_dropout (`float`, *optional*, defaults to `0.05`):
LoRA dropout.
lora_target_modules (`Optional[Union[str, List[str]]]`, *optional*, defaults to `None`):
LoRA target modules.
lora_modules_to_save (`Optional[List[str]]`, *optional*, defaults to `None`):
Model layers to unfreeze & train.
lora_task_type (`str`, *optional*, defaults to `"CAUSAL_LM"`):
Task type to pass for LoRA (use `"SEQ_CLS"` for reward modeling).
use_rslora (`bool`, *optional*, defaults to `False`):
Whether to use Rank-Stabilized LoRA, which sets the adapter scaling factor to `lora_alpha/√r`, instead of
the original default value of `lora_alpha/r`.
load_in_8bit (`bool`, *optional*, defaults to `False`):
Whether to use 8 bit precision for the base model. Works only with LoRA.
load_in_4bit (`bool`, *optional*, defaults to `False`):
Whether to use 4 bit precision for the base model. Works only with LoRA.
bnb_4bit_quant_type (`str`, *optional*, defaults to `"nf4"`):
Quantization type (`"fp4"` or `"nf4"`).
use_bnb_nested_quant (`bool`, *optional*, defaults to `False`):
Whether to use nested quantization.
"""
model_name_or_path: Optional[str] = None
model_revision: str = "main"
torch_dtype: Optional[Literal["auto", "bfloat16", "float16", "float32"]] = None
trust_remote_code: bool = False
attn_implementation: Optional[str] = None
use_peft: bool = False
lora_r: int = 16
lora_alpha: int = 32
lora_dropout: float = 0.05
lora_target_modules: Optional[List[str]] = None
lora_modules_to_save: Optional[List[str]] = None
lora_task_type: str = "CAUSAL_LM"
use_rslora: bool = False
load_in_8bit: bool = False
load_in_4bit: bool = False
bnb_4bit_quant_type: Literal["fp4", "nf4"] = "nf4"
use_bnb_nested_quant: bool = False
def __post_init__(self):
if self.load_in_8bit and self.load_in_4bit:
raise ValueError("You can't use 8 bit and 4 bit precision at the same time")
if isinstance(self.lora_target_modules, list) and len(self.lora_target_modules) == 1:
self.lora_target_modules = self.lora_target_modules[0]