### model model_name_or_path: /data/wyt/codes/checkpoints/Qwen2.5-7B-Instruct trust_remote_code: true ### method stage: sft do_train: true finetuning_type: full deepspeed: /data/wyt/codes/DocDPO/sft/checkpoints_multilang/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json] ### dataset dataset_dir: /data/wyt/codes/DocDPO/sft/data_multilang/red_multilang_base_balanced_en_zhdefr_320 dataset: sft_en-zh_tool,sft_en-zh_trans_base_sample,sft_en-de_tool,sft_en-de_trans_base_sample,sft_en-fr_tool,sft_en-fr_trans_base_sample template: qwen cutoff_len: 2560 # max_samples: 1000 overwrite_cache: true preprocessing_num_workers: 16 dataloader_num_workers: 4 mask_history: true ### output output_dir: /data/wyt/codes/DocDPO/sft/checkpoints_multilang/ted_base_balanced_en_zhdefr_320/sft logging_steps: 5 # save_steps: 300 save_strategy: epoch plot_loss: true overwrite_output_dir: true save_only_model: true report_to: tensorboard # choices: [none, wandb, tensorboard, swanlab, mlflow] ### train per_device_train_batch_size: 4 gradient_accumulation_steps: 2 # learning_rate: 2.0e-5 # learning_rate: 8.0e-6 learning_rate: 1.0e-5 num_train_epochs: 1.0 lr_scheduler_type: cosine warmup_ratio: 0.1 bf16: true ddp_timeout: 180000000 resume_from_checkpoint: null ### eval # eval_dataset: alpaca_en_demo # val_size: 0.1 # per_device_eval_batch_size: 1 # eval_strategy: steps # eval_steps: 500