ba-agent-posttrain-scripts / reward_model.py
richingme's picture
Upload BA agent post-training scripts
f8f0e4e verified
import os
import deepspeed
from trl import RewardTrainer, RewardConfig
import torch
from accelerate import Accelerator
from utils import (
ScriptArguments,
DEFINE_PAD_TOKEN,
format_prompt_answer,
maybe_distributed_barrier,
resolve_system_prompt,
)
from transformers import (
AutoTokenizer,
BitsAndBytesConfig,
HfArgumentParser,
AutoModelForSequenceClassification,
)
from data_adapter import load_preference_dataset
os.environ["WANDB_PROJECT"] = "ma-rlhf"
os.environ["WANDB_RUN_NAME"] = "reward_model"
parser = HfArgumentParser(ScriptArguments)
train_args: ScriptArguments = parser.parse_args_into_dataclasses(return_remaining_strings=True)[0]
dataset_name = train_args.dataset_name
dataset_sub_name = train_args.dataset_sub_name
dataset_split = train_args.dataset_split
model_name = train_args.model_name
deepspeed_config_name = train_args.deepspeed_config_name
seq_length = train_args.seq_length
batch_size = train_args.batch_size
output_name = train_args.output_name
is_peft = train_args.use_QLora
is_use_flash_attention2 = train_args.use_flash_attention_2
num_train_epochs = train_args.num_train_epochs
gradient_accumulation_steps = train_args.gradient_accumulation_steps
learning_rate = train_args.learning_rate
default_system_prompt = resolve_system_prompt(train_args.system_prompt)
def create_model_tokenizer(name):
# QLoRA
bnb_config = BitsAndBytesConfig(
load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_use_double_quant=True,
)
device_map = {"": Accelerator().local_process_index}
print('device map: ', device_map)
model = AutoModelForSequenceClassification.from_pretrained(
model_name,
quantization_config=bnb_config,
device_map=device_map, # 70b use auto
num_labels=1,
use_cache=False,
trust_remote_code=True,
)
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
tokenizer.add_special_tokens({'pad_token': DEFINE_PAD_TOKEN})
model.pad_token_id = tokenizer.pad_token_id
model.pad_token = tokenizer.pad_token
model.config.pad_token_id = tokenizer.pad_token_id
model.config.pad_token = tokenizer.pad_token
return model, tokenizer
def create_reward_model_datasets(datasets_name, dataset_sub_name, tokenizer):
train_dataset = load_preference_dataset(
datasets_name,
dataset_sub_name=dataset_sub_name,
split=dataset_split,
default_system_prompt=default_system_prompt,
)
train_dataset = train_dataset.map(
lambda examples: tokenize_reward_batch(examples, tokenizer),
batched=True,
)
maybe_distributed_barrier()
train_dataset = train_dataset.filter(
lambda x: len(x["input_ids_chosen"]) <= seq_length
and len(x["input_ids_rejected"]) <= seq_length
)
maybe_distributed_barrier()
# eval_dataset = eval_dataset.map(
# preprocess_function_hhrlhf,
# batched=True,
# num_proc=8,
# )
# torch.distributed.barrier()
# eval_dataset = eval_dataset.filter(
# lambda x: len(x["input_ids_chosen"]) <= seq_length
# and len(x["input_ids_rejected"]) <= seq_length
# )
# torch.distributed.barrier()
# return train_dataset, eval_dataset
return train_dataset, None
def tokenize_reward_batch(examples, tokenizer):
new_examples = {
"input_ids_chosen": [],
"attention_mask_chosen": [],
"input_ids_rejected": [],
"attention_mask_rejected": [],
}
for system_prompt, prompt, response_chosen, response_rejected in zip(
examples["system"], examples["prompt"], examples["chosen"], examples["rejected"]
):
chosen_text = format_prompt_answer(prompt, response_chosen, system_prompt=system_prompt)
rejected_text = format_prompt_answer(prompt, response_rejected, system_prompt=system_prompt)
tokenized_chosen = tokenizer(chosen_text, truncation=True, padding="longest")
tokenized_rejected = tokenizer(rejected_text, truncation=True, padding="longest")
new_examples["input_ids_chosen"].append(tokenized_chosen["input_ids"])
new_examples["attention_mask_chosen"].append(tokenized_chosen["attention_mask"])
new_examples["input_ids_rejected"].append(tokenized_rejected["input_ids"])
new_examples["attention_mask_rejected"].append(tokenized_rejected["attention_mask"])
return new_examples
def train():
model, tokenizer = create_model_tokenizer(model_name) # model is sequence classification
train_datasets, test_datasets = create_reward_model_datasets(dataset_name, None, tokenizer)
# PEFT
peft_config = create_peft_reward_model(is_peft)
# ZERO stage3 use config like # https://github.com/huggingface/trl/issues/835
reward_config = RewardConfig(
output_dir=output_name,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
num_train_epochs=num_train_epochs,
gradient_accumulation_steps=gradient_accumulation_steps,
gradient_checkpointing=True,
learning_rate=learning_rate,
report_to="wandb",
warmup_ratio=0.01,
remove_unused_columns=True,
optim="adamw_torch",
logging_steps=1,
max_length=seq_length,
deepspeed=deepspeed_config_name,
bf16=True,
lr_scheduler_type='cosine',
# evaluation_strategy="steps",
# eval_steps=100,
# max_steps=10,
)
trainer = RewardTrainer(
model,
args=reward_config,
train_dataset=train_datasets,
processing_class=tokenizer,
peft_config=peft_config,
)
trainer.train()
trainer.save_model(output_name)
if __name__ == "__main__":
train()