adam_beta1: 0.9 adam_beta2: 0.999 bf16: true cutoff_len: 2048 dataset: mlfoundations-dev/sky_t1_original dataset_dir: ONLINE ddp_timeout: 180000000 deepspeed: dcft/train/zero3.json do_train: true enable_liger_kernel: false eval_strategy: epoch finetuning_type: full global_batch_size: 512 gradient_accumulation_steps: 8 gradient_checkpointing: true hub_model_id: mlfoundations-dev/sky-t1-original-llama-instruct include_hp: dcft/train/hp_settings/hritik_no_model.yaml learning_rate: 5.0e-06 logging_steps: 10 lr_scheduler_type: constant max_grad_norm: 1 model_name_or_path: meta-llama/Llama-3.1-8B-Instruct neat_packing: true num_train_epochs: 3.0 output_dir: ./experiments/train/checkpoints/sky_t1_original_llama_instruct overwrite_cache: true overwrite_output_dir: true packing: true per_device_train_batch_size: 8 plot_loss: true preprocessing_num_workers: 16 push_to_db: true push_to_hub: true report_to: wandb run_name: sky-t1-original-llama-instruct save_strategy: epoch stage: sft template: llama3 val_size: 0.05 weight_decay: 0.1