| ''' |
| this code required trl==0.11.0 and support multi-adapter LoRA training |
| ''' |
|
|
| from codecs import BOM_BE |
| import re |
| import torch |
| import os |
| from trl import AutoModelForCausalLMWithValueHead, PPOTrainer, PPOConfig |
| from trl.core import LengthSampler |
| from transformers import AutoTokenizer, BitsAndBytesConfig, HfArgumentParser |
| from accelerate import Accelerator |
| from utils import ( |
| create_model_tokenizer, |
| create_peft, |
| is_main_process, |
| ScriptArguments, |
| DEFINE_EOS_TOKEN, |
| DEFINE_PAD_TOKEN, |
| format_prompt, |
| resolve_system_prompt, |
| ) |
| import time |
| from ma_ppo_config import MultiAdapterPPOConfig |
| from ma_ppo_trainer import MultiAdapterPPOTrainer |
| from data_adapter import load_prompt_dataset |
|
|
| os.environ["WANDB_PROJECT"] = "ma-rlhf" |
| os.environ["WANDB_RUN_NAME"] = "ppo" |
|
|
|
|
| |
| parser = HfArgumentParser(ScriptArguments) |
| train_args: ScriptArguments = parser.parse_args_into_dataclasses(return_remaining_strings=True)[0] |
|
|
| dataset_name = train_args.dataset_name |
| dataset_sub_name = train_args.dataset_sub_name |
| dataset_split = train_args.dataset_split |
| model_name = train_args.model_name |
| rm_model_name = train_args.reward_model_name |
| deepspeed_config_name = train_args.deepspeed_config_name |
| batch_size = train_args.batch_size |
| mini_batch_size = train_args.mini_batch_size |
| ppo_epochs = train_args.ppo_epochs |
| output_max_length = train_args.output_max_length |
| seq_length = train_args.seq_length |
| output_name = train_args.output_name |
| is_peft = train_args.use_QLora |
| is_use_flash_attention2 = train_args.use_flash_attention_2 |
|
|
| gradient_accumulation_steps = train_args.gradient_accumulation_steps |
| default_system_prompt = resolve_system_prompt(train_args.system_prompt) |
|
|
| def create_model_tokenizer(name, rm_model_name, peft_config): |
| |
| bnb_config = BitsAndBytesConfig( |
| load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16, |
| |
| ) |
|
|
| device_map = {"": Accelerator().local_process_index} |
| print('device map: ', device_map) |
|
|
| model = AutoModelForCausalLMWithValueHead.from_pretrained( |
| name, |
| quantization_config=bnb_config, |
| peft_config=peft_config, |
| reward_adapter=rm_model_name, |
| device_map=device_map, |
| use_flash_attention_2=is_use_flash_attention2, |
| trust_remote_code=True, |
| |
| ) |
|
|
| tokenizer = AutoTokenizer.from_pretrained( |
| model_name, |
| |
| trust_remote_code=True, |
| ) |
| tokenizer.add_special_tokens({'pad_token': DEFINE_PAD_TOKEN}) |
| model.pad_token_id = tokenizer.pad_token_id |
| model.pad_token = tokenizer.pad_token |
| model.pad_token_id = tokenizer.pad_token_id |
| |
|
|
| return model, tokenizer |
|
|
|
|
| def create_dataset(dataset_name, tokenizer): |
| datasets = load_prompt_dataset( |
| dataset_name, |
| dataset_sub_name=dataset_sub_name, |
| split=dataset_split, |
| default_system_prompt=default_system_prompt, |
| ) |
| datasets = datasets.map( |
| lambda examples: { |
| "query": [ |
| format_prompt(question, system_prompt=system_prompt) |
| for system_prompt, question in zip(examples["system"], examples["prompt"]) |
| ], |
| "input_ids": [ |
| tokenizer( |
| format_prompt(question, system_prompt=system_prompt), |
| return_tensors="pt", |
| )["input_ids"][0] |
| for system_prompt, question in zip(examples["system"], examples["prompt"]) |
| ], |
| }, |
| batched=True, |
| remove_columns=datasets.column_names, |
| ) |
|
|
| datasets = datasets.filter(lambda x: len(x["input_ids"]) < seq_length, batched=False) |
| datasets.set_format(type="torch") |
| return datasets |
|
|
|
|
| def collator(examples): |
| batch = {'query': [], 'input_ids': []} |
| for example in examples: |
| batch['query'].append(example['query']) |
| batch['input_ids'].append(torch.tensor(example['input_ids'], dtype=torch.long)) |
| return batch |
|
|
| def train(): |
| peft_config = create_peft(is_peft) |
| model, tokenizer = create_model_tokenizer( |
| model_name, rm_model_name, peft_config |
| ) |
|
|
| dataset = create_dataset(dataset_name, tokenizer) |
| print(dataset) |
|
|
| |
| generation_kwargs = { |
| "min_length": -1, |
| "max_new_tokens": output_max_length, |
| "top_k": 0.0, |
| "top_p": 1.0, |
| "do_sample": True, |
| "pad_token_id": tokenizer.pad_token_id, |
| "eos_token_id": tokenizer.eos_token_id, |
| "forced_eos_token_id": tokenizer.eos_token_id, |
| |
| } |
| output_length_sampler = LengthSampler(128, output_max_length) |
|
|
| config = MultiAdapterPPOConfig( |
| log_with='wandb', |
| learning_rate=1e-5, |
| batch_size=batch_size, |
| mini_batch_size=mini_batch_size, |
| gradient_accumulation_steps=gradient_accumulation_steps, |
| optimize_cuda_cache=True, |
| early_stopping=True, |
| target_kl=0.1, |
| ppo_epochs=ppo_epochs, |
| seed=0, |
| init_kl_coef=0.2, |
| adap_kl_ctrl=True, |
| max_grad_norm=1.0, |
| ) |
|
|
| trainer = MultiAdapterPPOTrainer( |
| config, |
| model, |
| ref_model=None, |
| tokenizer=tokenizer, |
| dataset=dataset, |
| data_collator=collator, |
| ) |
|
|
| reward_baseline = 0.0 |
| save_freq = 50 |
|
|
| |
| for epoch, batch in enumerate(trainer.dataloader): |
| start_time = time.time() |
|
|
| if epoch >= config.total_ppo_epochs: |
| break |
|
|
| question_tensors = batch["input_ids"] |
| response_tensors = trainer.generate( |
| question_tensors, |
| return_prompt=False, |
| |
| **generation_kwargs, |
| ) |
| batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True) |
|
|
| texts = [q + r for q, r in zip(batch["query"], batch["response"])] |
| rm_model = trainer.accelerator.unwrap_model(trainer.model) |
| raw_rewards = [] |
| for text in texts: |
| inputs = tokenizer(text, return_tensors='pt').to(trainer.accelerator.device) |
| score = rm_model.compute_reward_score(**inputs)[0,-1,0] - reward_baseline |
| raw_rewards.append(score) |
| rewards = raw_rewards |
|
|
| |
| stats = trainer.step(question_tensors, response_tensors, rewards) |
| trainer.log_stats(stats, batch, rewards) |
|
|
| if is_main_process(): |
| for text, reward in zip(texts, rewards): |
| print('-----------------------------------') |
| print(text) |
| print(reward.item()) |
| print('-----------------------------------') |
| print(f"step:{epoch}/all:{len(trainer.dataloader)},loss:{stats['ppo/loss/total']},mean_scores:{stats['ppo/mean_scores']}" ) |
|
|
| if save_freq and epoch and epoch % save_freq == 0: |
| trainer.save_pretrained(f'{output_name}_{epoch}') |
| print(f'{output_name}_{epoch}') |
| |
| trainer.save_pretrained(output_name) |
|
|
| if __name__ == "__main__": |
| train() |
|
|