| architecture: |
| backbone_dtype: bfloat16 |
| gradient_checkpointing: true |
| intermediate_dropout: 0.0 |
| pretrained: true |
| pretrained_weights: '' |
| augmentation: |
| neftune_noise_alpha: 0.0 |
| random_parent_probability: 0.0 |
| skip_parent_probability: 0.0 |
| token_mask_probability: 0.0 |
| dataset: |
| add_eos_token_to_answer: true |
| add_eos_token_to_prompt: true |
| add_eos_token_to_system: true |
| answer_column: chosen_response |
| chatbot_author: H2O.ai |
| chatbot_name: h2oGPT |
| data_sample: 1.0 |
| data_sample_choice: |
| - Train |
| - Validation |
| id_column: None |
| limit_chained_samples: true |
| mask_prompt_labels: true |
| only_last_answer: false |
| parent_id_column: None |
| personalize: false |
| prompt_column: |
| - prompt |
| prompt_column_separator: \n\n |
| rejected_answer_column: rejected_response |
| rejected_prompt_column: None |
| system_column: None |
| text_answer_separator: '' |
| text_prompt_start: '' |
| text_system_start: <|system|> |
| train_dataframe: /root/h2o-llmstudio/data/user/ |
| validation_dataframe: None |
| validation_size: 0.01 |
| validation_strategy: automatic |
| environment: |
| compile_model: false |
| deepspeed_allgather_bucket_size: 1000000 |
| deepspeed_method: ZeRO2 |
| deepspeed_reduce_bucket_size: 1000000 |
| deepspeed_stage3_param_persistence_threshold: 1000000 |
| deepspeed_stage3_prefetch_bucket_size: 1000000 |
| find_unused_parameters: false |
| gpus: |
| - '0' |
| - '1' |
| - '2' |
| - '3' |
| huggingface_branch: main |
| mixed_precision: true |
| mixed_precision_dtype: bfloat16 |
| number_of_workers: 8 |
| seed: 69 |
| trust_remote_code: true |
| use_deepspeed: false |
| experiment_name: |
| llm_backbone: |
| logging: |
| log_all_ranks: false |
| log_step_size: absolute |
| logger: Neptune |
| neptune_project: nischaychai/nis-exp |
| wandb_entity: '' |
| wandb_project: '' |
| output_directory: /root/h2o-llmstudio/output/user/ |
| prediction: |
| batch_size_inference: 0 |
| do_sample: false |
| max_length_inference: 256 |
| max_time: 0.0 |
| metric: GPT |
| metric_gpt_model: gpt-3.5-turbo-0301 |
| metric_gpt_template: mt-bench |
| min_length_inference: 2 |
| num_beams: 1 |
| num_history: 4 |
| repetition_penalty: 1.0 |
| stop_tokens: '' |
| temperature: 0.0 |
| top_k: 0 |
| top_p: 1.0 |
| problem_type: text_dpo_modeling |
| tokenizer: |
| add_prompt_answer_tokens: false |
| max_length: 1280 |
| padding_quantile: 1.0 |
| tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}' |
| training: |
| attention_implementation: auto |
| batch_size: 2 |
| beta: 0.34 |
| differential_learning_rate: 1.0e-05 |
| differential_learning_rate_layers: [] |
| drop_last_batch: true |
| epochs: 3 |
| evaluate_before_training: false |
| evaluation_epochs: 1.0 |
| freeze_layers: [] |
| grad_accumulation: 4 |
| gradient_clip: 2.0 |
| learning_rate: 4.0e-06 |
| lora: true |
| lora_alpha: 96 |
| lora_dropout: 0.0 |
| lora_r: 32 |
| lora_target_modules: '' |
| lora_unfreeze_layers: [] |
| loss_function: SimPOLoss |
| min_learning_rate_ratio: 0.0 |
| optimizer: AdamW |
| save_checkpoint: last |
| schedule: Cosine |
| simpo_gamma: 1.0 |
| train_validation_data: false |
| use_dora: false |
| use_rslora: false |
| warmup_epochs: 0.0 |
| weight_decay: 1.0e-05 |
|
|