#!/usr/bin/env bash set -euo pipefail script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" exp_root="$(cd "${script_dir}/.." && pwd)" role_name="${ROLE_NAME:-writer}" deepspeed_config_name="${DEEPSPEED_CONFIG_NAME:-${exp_root}/config/ds.json}" dataset_sub_name="${DATASET_SUB_NAME:-}" dataset_split="${DATASET_SPLIT:-train}" system_prompt="${SYSTEM_PROMPT:-}" output_root="${OUTPUT_ROOT:-${exp_root}/output/${role_name}}" reward_base_model_path="${REWARD_BASE_MODEL_PATH:-${output_root}/sft_full}" preference_dataset_name="${PREFERENCE_DATASET_NAME:-${exp_root}/data/${role_name}_preference.jsonl}" reward_model_lora_path="${output_root}/reward_model_lora" mkdir -p "${output_root}" if [ ! -f "${preference_dataset_name}" ] && [ ! -d "${preference_dataset_name}" ]; then echo "Missing preference dataset: ${preference_dataset_name}" echo "See ${exp_root}/README.md and the sample JSONL files." exit 1 fi deepspeed "${exp_root}/reward_model.py" \ --dataset_name="${preference_dataset_name}" \ --dataset_sub_name="${dataset_sub_name}" \ --dataset_split="${dataset_split}" \ --system_prompt="${system_prompt}" \ --model_name="${reward_base_model_path}" \ --seq_length="${SEQ_LENGTH:-1024}" \ --batch_size="${BATCH_SIZE:-8}" \ --output_name="${reward_model_lora_path}" \ --use_QLora="${USE_QLORA:-True}" \ --use_flash_attention_2="${USE_FLASH_ATTENTION_2:-True}" \ --deepspeed_config_name="${deepspeed_config_name}" \ --num_train_epochs="${NUM_TRAIN_EPOCHS:-2}" \ --gradient_accumulation_steps="${GRADIENT_ACCUMULATION_STEPS:-4}" \ --learning_rate="${LEARNING_RATE:-2e-5}"