bf16: true cutoff_len: 512 dataset: llamafactory/alpaca_en dataset_dir: ONLINE ddp_timeout: 180000000 deepspeed: dcft/train/zero1.json do_train: true enable_liger_kernel: true eval_strategy: epoch finetuning_type: full formatting: alpaca gradient_accumulation_steps: 4 gradient_checkpointing: true hub_model_id: marianna13/meta-llama-3.1-8b-alpaca-sft-sample learning_rate: 2.0e-05 logging_steps: 10 lr_scheduler_type: cosine model_name_or_path: meta-llama/Meta-Llama-3.1-8B neat_packing: true num_train_epochs: 1.0 output_dir: experiments/train/checkpoints/mistral_alpaca_sft_sample overwrite_cache: true overwrite_output_dir: true packing: true per_device_train_batch_size: 16 plot_loss: true preprocessing_num_workers: 16 push_to_db: true push_to_hub: true report_to: wandb run_name: llama3_alpaca_sft_sample save_strategy: epoch stage: sft template: alpaca val_size: 0.05 warmup_ratio: 0.1