bf16_vs_fp8 / scripts /train_vicuna_7b.sh
zjasper666's picture
Upload folder using huggingface_hub
8655a4b verified
raw
history blame contribute delete
893 Bytes
torchrun --nproc_per_node=4 --master_port=20001 fastchat/train/train_mem.py \
--model_name_or_path ~/model_weights/llama-7b \
--data_path ~/datasets/sharegpt_20230422_clean_lang_split_identity.json \
--bf16 True \
--output_dir output_vicuna_7b \
--num_train_epochs 3 \
--per_device_train_batch_size 2 \
--per_device_eval_batch_size 16 \
--gradient_accumulation_steps 16 \
--evaluation_strategy "steps" \
--eval_steps 1500 \
--save_strategy "steps" \
--save_steps 1500 \
--save_total_limit 8 \
--learning_rate 2e-5 \
--weight_decay 0. \
--warmup_ratio 0.04 \
--lr_scheduler_type "cosine" \
--logging_steps 1 \
--fsdp "full_shard auto_wrap" \
--fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
--tf32 True \
--model_max_length 2048 \
--gradient_checkpointing True \
--lazy_preprocess True