adam_beta1: '0.9' | |
adam_beta2: '0.999' | |
bf16: 'True' | |
cutoff_len: '2048' | |
dataset: mlfoundations-dev/oh-dcft-v1.2_no-curation_gpt-4o-mini | |
dataset_dir: ONLINE | |
ddp_timeout: '180000000' | |
deepspeed: /opt/ml/code/zero3.json | |
do_train: 'True' | |
enable_liger_kernel: 'False' | |
eval_strategy: epoch | |
finetuning_type: full | |
formatting: sharegpt | |
global_batch_size: '512' | |
gradient_accumulation_steps: '1' | |
gradient_checkpointing: 'True' | |
hub_model_id: mlfoundations-dev/oh-dcft-v1.2_no-curation_gpt-4o-mini | |
learning_rate: 5e-06 | |
logging_steps: '10' | |
lr_scheduler_type: constant | |
max_grad_norm: '1' | |
messages: conversations | |
model_name_or_path: meta-llama/Meta-Llama-3.1-8B | |
neat_packing: 'True' | |
num_train_epochs: '3.0' | |
output_dir: /opt/ml/checkpoints | |
overwrite_cache: 'True' | |
overwrite_output_dir: 'True' | |
packing: 'True' | |
per_device_train_batch_size: '8' | |
plot_loss: 'True' | |
preprocessing_num_workers: '16' | |
push_to_db: 'True' | |
push_to_hub: 'True' | |
report_to: wandb | |
run_name: oh-dcft-v1.2_no-curation_gpt-4o-mini | |
save_strategy: epoch | |
stage: sft | |
template: llama3 | |
val_size: '0.05' | |
warmup_ratio: '0.1' | |
warmup_steps: '1738' | |
weight_decay: '0.1' | |