adam_beta1: '0.9' adam_beta2: '0.999' assistant_tag: gpt bf16: 'True' content_tag: value cutoff_len: '2048' dataset: mlfoundations-dev/oh-dcft-v3.1-gpt-4o-mini dataset_dir: ONLINE ddp_timeout: '180000000' deepspeed: /opt/ml/code/zero3.json do_train: 'True' enable_liger_kernel: 'False' eval_strategy: epoch finetuning_type: full formatting: sharegpt global_batch_size: '512' gradient_accumulation_steps: '16' gradient_checkpointing: 'True' hub_model_id: mlfoundations-dev/hp_ablations_gemma_scheduler_cosine_warmup0.05_minlr5e-7 learning_rate: 5e-06 logging_steps: '10' lr_scheduler_kwargs: min_lr=5e-07 lr_scheduler_type: cosine_with_min_lr max_grad_norm: '1' messages: conversations model_name_or_path: google/gemma-2-9b neat_packing: 'True' num_train_epochs: '3.0' output_dir: /opt/ml/model overwrite_cache: 'True' overwrite_output_dir: 'True' packing: 'True' per_device_eval_batch_size: '4' per_device_train_batch_size: '4' plot_loss: 'True' preprocessing_num_workers: '16' push_to_db: 'True' push_to_hub: 'True' report_to: wandb role_tag: from run_name: hp_ablations_gemma_scheduler_cosine_warmup0.05_minlr5e-7 save_strategy: epoch stage: sft template: gemma user_tag: human val_size: '0.05' warmup_ratio: '0.05' weight_decay: '0.1'