| base_model: mistralai/Mistral-Small-3.2-24B-Instruct-2506 | |
| processor_type: AutoProcessor | |
| load_in_8bit: false | |
| load_in_4bit: true | |
| # these 3 lines are needed for now to handle vision chat templates w images | |
| skip_prepare_dataset: true | |
| remove_unused_columns: false | |
| sample_packing: false | |
| chat_template: mistral_v7_tekken | |
| datasets: | |
| - path: hardlyworking/HardlyRPv2-10k | |
| type: chat_template | |
| split: train | |
| field_messages: messages | |
| dataset_prepared_path: last_run_prepared | |
| val_set_size: 0.05 | |
| output_dir: ./outputs/out | |
| adapter: qlora | |
| lora_model_dir: | |
| sequence_len: 8192 | |
| pad_to_sequence_len: false | |
| lora_r: 64 | |
| lora_alpha: 32 | |
| lora_dropout: 0.05 | |
| lora_target_modules: 'model.language_model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj' | |
| wandb_project: | |
| wandb_entity: | |
| wandb_watch: | |
| wandb_name: | |
| wandb_log_model: | |
| gradient_accumulation_steps: 1 | |
| micro_batch_size: 16 | |
| num_epochs: 1 | |
| optimizer: adamw_bnb_8bit | |
| lr_scheduler: cosine | |
| learning_rate: 0.0002 | |
| bf16: true | |
| fp16: | |
| tf32: true | |
| gradient_checkpointing: unsloth | |
| logging_steps: 1 | |
| # flash_attention: false # PixtralVisionModel does not support Flash Attention 2.0 yet. | |
| sdp_attention: true | |
| warmup_ratio: 0.1 | |
| evals_per_epoch: 4 | |
| saves_per_epoch: 1 | |
| weight_decay: 0.0 | |
| special_tokens: | |
| # save_first_step: true # uncomment this to validate checkpoint saving works with your config | |