| cutoff_len: 1024 |
| dataset: Reseacrhed_data |
| dataset_dir: data |
| do_train: true |
| finetuning_type: lora |
| flash_attn: auto |
| fp16: true |
| gradient_accumulation_steps: 8 |
| learning_rate: 2.0e-05 |
| logging_steps: 5 |
| lora_alpha: 16 |
| lora_dropout: 0 |
| lora_rank: 8 |
| lora_target: q_proj,v_proj |
| lr_scheduler_type: cosine |
| max_grad_norm: 1.0 |
| max_samples: 10000 |
| model_name_or_path: TinyLlama/TinyLlama-1.1B-Chat-v1.0 |
| num_train_epochs: 2.0 |
| optim: adamw_torch |
| output_dir: saves/LLaMA-tiny/lora/train_2024-05-05-10-57-50 |
| packing: true |
| per_device_train_batch_size: 2 |
| quantization_bit: 8 |
| report_to: none |
| save_steps: 100 |
| stage: pt |
| template: default |
| warmup_steps: 0 |
|
|