| #!/usr/bin/env bash |
| set -euo pipefail |
|
|
| script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" |
| exp_root="$(cd "${script_dir}/.." && pwd)" |
|
|
| role_name="${ROLE_NAME:-writer}" |
| deepspeed_config_name="${DEEPSPEED_CONFIG_NAME:-${exp_root}/config/ds.json}" |
| dataset_sub_name="${DATASET_SUB_NAME:-}" |
| dataset_split="${DATASET_SPLIT:-train}" |
| system_prompt="${SYSTEM_PROMPT:-}" |
|
|
| output_root="${OUTPUT_ROOT:-${exp_root}/output/${role_name}}" |
| model_sft_full_path="${MODEL_SFT_FULL_PATH:-${output_root}/sft_full}" |
| preference_dataset_name="${PREFERENCE_DATASET_NAME:-${exp_root}/data/${role_name}_preference.jsonl}" |
| model_dpo_lora_path="${output_root}/dpo_lora" |
| model_dpo_full_path="${output_root}/dpo_full" |
|
|
| mkdir -p "${output_root}" |
|
|
| if [ ! -f "${preference_dataset_name}" ] && [ ! -d "${preference_dataset_name}" ]; then |
| echo "Missing preference dataset: ${preference_dataset_name}" |
| echo "See ${exp_root}/README.md and the sample JSONL files." |
| exit 1 |
| fi |
|
|
| deepspeed "${exp_root}/dpo.py" \ |
| --dataset_name="${preference_dataset_name}" \ |
| --dataset_sub_name="${dataset_sub_name}" \ |
| --dataset_split="${dataset_split}" \ |
| --system_prompt="${system_prompt}" \ |
| --model_name="${model_sft_full_path}" \ |
| --output_name="${model_dpo_lora_path}" \ |
| --use_QLora="${USE_QLORA:-True}" \ |
| --use_flash_attention_2="${USE_FLASH_ATTENTION_2:-True}" \ |
| --deepspeed_config_name="${deepspeed_config_name}" \ |
| --batch_size="${BATCH_SIZE:-8}" \ |
| --num_train_epochs="${NUM_TRAIN_EPOCHS:-2}" \ |
| --seq_length="${SEQ_LENGTH:-1024}" \ |
| --output_max_length="${OUTPUT_MAX_LENGTH:-512}" \ |
| --gradient_accumulation_steps="${GRADIENT_ACCUMULATION_STEPS:-4}" \ |
| --learning_rate="${LEARNING_RATE:-2e-5}" |
|
|
| python "${exp_root}/merge_adapter.py" \ |
| --base_model_name="${model_sft_full_path}" \ |
| --model_name="${model_dpo_lora_path}" \ |
| --merged_model_name="${model_dpo_full_path}" |
|
|