File size: 753 Bytes
ce24f5e 77762a5 ce24f5e e50a64e ce24f5e 949a27b 77762a5 ce24f5e 8d959a7 77762a5 8d959a7 77762a5 7019509 ce24f5e a1da39c abddcf4 77762a5 c2a0792 8d959a7 8b79ff0 949a27b ce24f5e 8d959a7 ace70b3 d1aed4c ce24f5e 77762a5 5f79b82 77762a5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
base_model: EleutherAI/pythia-1.4b-deduped
load_in_8bit: true
datasets:
- path: teknium/GPT4-LLM-Cleaned
type: alpaca
dataset_prepared_path:
val_set_size: 0.05
adapter: lora
lora_model_dir:
sequence_len: 512
lora_r: 16
lora_alpha: 32
lora_dropout: 0.05
lora_target_modules:
- query_key_value
lora_target_linear:
lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
wandb_project:
wandb_entity:
wandb_watch:
wandb_name:
wandb_log_model:
output_dir: ./lora-alpaca-pythia
gradient_accumulation_steps: 1
micro_batch_size: 4
num_epochs: 4
learning_rate: 0.00001
train_on_inputs: false
group_by_length: false
bf16: true
tf32: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
weight_decay: 0.1
evals_per_epoch: 4
logging_steps: 1
|