| import os |
| from trl import DPOTrainer, DPOConfig |
| import torch |
| from accelerate import Accelerator |
| from utils import ( |
| ScriptArguments, |
| DEFINE_PAD_TOKEN, |
| create_peft, |
| format_prompt, |
| resolve_system_prompt, |
| ) |
| from transformers import ( |
| AutoTokenizer, |
| BitsAndBytesConfig, |
| HfArgumentParser, |
| AutoModelForCausalLM, |
| ) |
| from data_adapter import load_preference_dataset |
|
|
| os.environ["WANDB_PROJECT"] = "ma-rlhf" |
| os.environ["WANDB_RUN_NAME"] = "dpo" |
|
|
| parser = HfArgumentParser(ScriptArguments) |
| train_args: ScriptArguments = parser.parse_args_into_dataclasses(return_remaining_strings=True)[0] |
|
|
| dataset_name = train_args.dataset_name |
| dataset_sub_name = train_args.dataset_sub_name |
| dataset_split = train_args.dataset_split |
| model_name = train_args.model_name |
| deepspeed_config_name = train_args.deepspeed_config_name |
| output_max_length = train_args.output_max_length |
| seq_length = train_args.seq_length |
| batch_size = train_args.batch_size |
| output_name = train_args.output_name |
| is_peft = train_args.use_QLora |
| is_use_flash_attention2 = train_args.use_flash_attention_2 |
| num_train_epochs = train_args.num_train_epochs |
| beta = 0.1 |
| gradient_accumulation_steps = train_args.gradient_accumulation_steps |
| learning_rate = train_args.learning_rate |
| use_qlora_double_quant = train_args.use_qlora_double_quant |
| default_system_prompt = resolve_system_prompt(train_args.system_prompt) |
|
|
|
|
| def create_model_tokenizer(name): |
| |
| bnb_config = BitsAndBytesConfig( |
| load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16, |
| bnb_4bit_use_double_quant=use_qlora_double_quant, |
| ) |
|
|
| device_map = {"": Accelerator().local_process_index} |
| print('device map: ', device_map) |
|
|
| model = AutoModelForCausalLM.from_pretrained( |
| model_name, |
| quantization_config=bnb_config if is_peft else None, |
| device_map=device_map, |
| trust_remote_code=True, |
| use_flash_attention_2=is_use_flash_attention2, |
| ) |
|
|
| tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True, model_max_length=seq_length, |
| trust_remote_code=True,) |
|
|
| tokenizer.add_special_tokens({'pad_token': DEFINE_PAD_TOKEN}) |
| model.pad_token_id = tokenizer.pad_token_id |
| model.pad_token = tokenizer.pad_token |
|
|
|
|
| return model, tokenizer |
|
|
|
|
| def create_dpo_datasets(datasets_name, dataset_sub_name, tokenizer): |
| train_dataset = load_preference_dataset( |
| datasets_name, |
| dataset_sub_name=dataset_sub_name, |
| split=dataset_split, |
| default_system_prompt=default_system_prompt, |
| ) |
| train_dataset = train_dataset.map( |
| lambda example: { |
| "prompt": format_prompt(example["prompt"], system_prompt=example["system"]), |
| "chosen": example["chosen"], |
| "rejected": example["rejected"], |
| }, |
| remove_columns=["system"], |
| ) |
|
|
| return train_dataset, None |
|
|
|
|
| def train(): |
| model, tokenizer = create_model_tokenizer(model_name) |
| train_datasets, test_datasets = create_dpo_datasets( |
| dataset_name, None, tokenizer |
| ) |
|
|
| |
| peft_config = create_peft(is_peft) |
|
|
| training_args = DPOConfig( |
| output_dir=output_name, |
| save_strategy='epoch', |
| logging_steps=1, |
| num_train_epochs=num_train_epochs, |
| gradient_checkpointing=True, |
| bf16=True, |
| learning_rate=learning_rate, |
| warmup_ratio=0.05, |
| per_device_train_batch_size=batch_size, |
| per_device_eval_batch_size=batch_size, |
| gradient_accumulation_steps=gradient_accumulation_steps, |
| deepspeed=deepspeed_config_name, |
| report_to='wandb', |
| lr_scheduler_type='cosine', |
| |
| |
| |
| |
| loss_type='sigmoid', |
| dataset_num_proc=64, |
| max_completion_length=output_max_length, |
| max_prompt_length= output_max_length, |
| max_length=seq_length, |
| ) |
|
|
| trainer = DPOTrainer( |
| model, |
| None, |
| args=training_args, |
| train_dataset=train_datasets, |
| peft_config=peft_config, |
| processing_class=tokenizer, |
| ) |
|
|
| trainer.train() |
| trainer.save_model(output_name) |
|
|
|
|
| if __name__ == "__main__": |
| train() |
|
|